content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.R
\name{check_function_layout}
\alias{check_function_layout}
\title{Check a Function's Layout}
\usage{
check_function_layout(
object,
function_name = NULL,
max_lines_of_code = get_cleanr_options("max_lines_of_code"),
max_lines = get_cleanr_options("max_lines"),
max_num_arguments = get_cleanr_options("max_num_arguments"),
max_nesting_depth = get_cleanr_options("max_nesting_depth"),
max_line_width = get_cleanr_options("max_line_width"),
check_return = get_cleanr_options("check_return")
)
}
\arguments{
\item{object}{The function to be checked.}
\item{function_name}{The name to be used for reporting. Stick with the
default: If NULL, it is taken from the \code{object} given.
Argument is used internally to pass function names
retrieved via \code{\link{get}} in the wrapper function
\code{\link{check_functions_in_file}}.}
\item{max_lines_of_code}{See \code{\link{check_num_lines_of_code}}.}
\item{max_lines}{See \code{\link{check_num_lines}}.}
\item{max_num_arguments}{See \code{\link{check_num_arguments}}.}
\item{max_nesting_depth}{See \code{\link{check_nesting_depth}}.}
\item{max_line_width}{See \code{\link{check_line_width}}.}
\item{check_return}{See \code{\link{check_return}}.}
}
\value{
\code{\link[base:invisible]{Invisibly}} \code{\link{TRUE}},
but see \emph{Details}.
}
\description{
Run all \code{\link{function_checks}} on a function.
}
\details{
The functions catches the messages of "cleanr"-conditions
\code{\link{throw}}n by \code{\link{function_checks}} and, if it caught any,
\code{\link{throw}}s them.
}
\examples{
print(cleanr::check_function_layout(cleanr::check_num_lines))
}
\seealso{
Other wrappers:
\code{\link{check_directory}()},
\code{\link{check_file_layout}()},
\code{\link{check_file}()},
\code{\link{check_functions_in_file}()},
\code{\link{check_package}()}
}
\concept{wrappers}
| /man/check_function_layout.Rd | no_license | cran/cleanr | R | false | true | 1,929 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.R
\name{check_function_layout}
\alias{check_function_layout}
\title{Check a Function's Layout}
\usage{
check_function_layout(
object,
function_name = NULL,
max_lines_of_code = get_cleanr_options("max_lines_of_code"),
max_lines = get_cleanr_options("max_lines"),
max_num_arguments = get_cleanr_options("max_num_arguments"),
max_nesting_depth = get_cleanr_options("max_nesting_depth"),
max_line_width = get_cleanr_options("max_line_width"),
check_return = get_cleanr_options("check_return")
)
}
\arguments{
\item{object}{The function to be checked.}
\item{function_name}{The name to be used for reporting. Stick with the
default: If NULL, it is taken from the \code{object} given.
Argument is used internally to pass function names
retrieved via \code{\link{get}} in the wrapper function
\code{\link{check_functions_in_file}}.}
\item{max_lines_of_code}{See \code{\link{check_num_lines_of_code}}.}
\item{max_lines}{See \code{\link{check_num_lines}}.}
\item{max_num_arguments}{See \code{\link{check_num_arguments}}.}
\item{max_nesting_depth}{See \code{\link{check_nesting_depth}}.}
\item{max_line_width}{See \code{\link{check_line_width}}.}
\item{check_return}{See \code{\link{check_return}}.}
}
\value{
\code{\link[base:invisible]{Invisibly}} \code{\link{TRUE}},
but see \emph{Details}.
}
\description{
Run all \code{\link{function_checks}} on a function.
}
\details{
The functions catches the messages of "cleanr"-conditions
\code{\link{throw}}n by \code{\link{function_checks}} and, if it caught any,
\code{\link{throw}}s them.
}
\examples{
print(cleanr::check_function_layout(cleanr::check_num_lines))
}
\seealso{
Other wrappers:
\code{\link{check_directory}()},
\code{\link{check_file_layout}()},
\code{\link{check_file}()},
\code{\link{check_functions_in_file}()},
\code{\link{check_package}()}
}
\concept{wrappers}
|
## File Name: tam.mml.mfr.R
## File Version: 9.955
tam.mml.mfr <- function( resp, Y=NULL, group=NULL, irtmodel="1PL",
formulaY=NULL, dataY=NULL, ndim=1, pid=NULL, xsi.fixed=NULL,
xsi.setnull=NULL, xsi.inits=NULL, beta.fixed=NULL, beta.inits=NULL,
variance.fixed=NULL, variance.inits=NULL, est.variance=TRUE,
formulaA=~item+item:step, constraint="cases", A=NULL, B=NULL,
B.fixed=NULL, Q=NULL, facets=NULL, est.slopegroups=NULL, E=NULL,
pweights=NULL, verbose=TRUE, control=list(), delete.red.items=TRUE )
{
CALL <- match.call()
a0 <- Sys.time()
s1 <- Sys.time()
prior_list_xsi=NULL
mstep_intercept_method <- "R"
# display
disp <- "....................................................\n"
increment.factor <- progress <- nodes <- snodes <- ridge <- xsi.start0 <- QMC <- NULL
maxiter <- conv <- convD <- min.variance <- max.increment <- Msteps <- convM <- NULL
resp_orig <- resp
B00 <- B
B <- trim_increment <- NULL
fac.oldxsi <- acceleration <- NULL
#**** handle verbose argument
args_CALL <- as.list( sys.call() )
if ( ! tam_in_names_list( list=control, variable="progress" ) ){
control$progress <- verbose
}
#--- attach control elements
e1 <- environment()
tam_fct <- "tam.mml.mfr"
res <- tam_mml_control_list_define(control=control, envir=e1, tam_fct=tam_fct,
prior_list_xsi=prior_list_xsi)
con <- res$con
con1a <- res$con1a
#- check constraint
constraint <- tam_mml_constraint_check(constraint=constraint)
# userfct.variance is not allowed in tam.mml.mfr
userfct.variance <- NULL
fac.oldxsi <- max( 0, min( c( fac.oldxsi, .95 ) ) )
if ( constraint=="items" ){ beta.fixed <- FALSE }
pid0 <- pid <- unname(c(unlist(pid)))
if (progress){
cat(disp)
cat("Processing Data ", paste(Sys.time()), "\n") ; utils::flush.console()
}
if ( ! is.null(group) ){
con1a$QMC <- QMC <- FALSE
con1a$snodes <- snodes <- 0
}
resp <- as.matrix(resp)
resp <- add.colnames.resp(resp)
itemnames <- colnames(resp)
nullY <- is.null(Y)
if ( ! is.null(facets) ){
facets <- as.data.frame(facets)
}
# cat("read data" ) ; a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
#--- compute maxKi
res <- tam_mml_mfr_proc_compute_maxKi(resp=resp, facets=facets)
maxKi <- res$maxKi
#--- handle formula and facets
resp00 <- resp
res <- tam_mml_mfr_dataprep( formulaA=formulaA, xsi.setnull=xsi.setnull, B=B,
Q=Q, resp=resp, pid=pid, facets=facets, beta.fixed=beta.fixed )
formulaA <- res$formula_update
xsi.setnull <- res$xsi.setnull
beta.fixed <- res$beta.fixed
facets <- res$facets
PSF <- res$PSF
pid <- res$pid
#cat(" mml mfr dataprep " ) ; a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
#--- create design matrices
res <- tam_mml_mfr_proc_create_design_matrices( pid=pid, maxKi=maxKi, resp=resp,
formulaA=formulaA, facets=facets, constraint=constraint, ndim=ndim, Q=Q,
A=A, B=B, progress=progress, xsi.fixed=xsi.fixed, resp00=resp00, B00=B00,
beta.fixed=beta.fixed )
pid <- res$pid
diffKi <- res$diffKi
var_ki <- res$var_ki
xsi.fixed <- res$xsi.fixed
xsi.elim <- res$xsi.elim
beta.fixed <- res$beta.fixed
A <- res$A
cA <- res$cA
B <- res$B
Q <- res$Q
X <- res$X
X.red <- res$X.red
gresp <- res$gresp
gresp.noStep <- res$gresp.noStep
xsi.constr <- res$xsi.constr
design <- res$design
# cat(" --- design matrix ready" ) ; a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
#--- processing in case of multiple person IDs in a dataset
tp <- max(table(pid))
if ( tp > 1){
res <- tam_mml_mfr_proc_multiple_person_ids( pid=pid, tp=tp, gresp=gresp,
gresp.noStep=gresp.noStep, progress=progress, group=group, Y=Y,
pweights=pweights)
pid <- res$pid
gresp <- res$gresp
gresp.noStep <- res$gresp.noStep
group <- res$group
Y <- res$Y
pweights <- res$pweights
}
# cat("process data in case of multiple persons" ) ; a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
#--- set some xsi effects to zero
res <- tam_mml_mfr_proc_xsi_setnull( xsi.setnull=xsi.setnull, A=A, xsi.fixed=xsi.fixed )
xsi.fixed <- res$xsi.fixed
xsi0 <- res$xsi0
nitems <- nrow(X.red)
nstud <- nrow(gresp) # number of students
if ( is.null(pweights) ){
pweights <- rep(1,nstud) # weights of response pattern
}
if (progress){
cat(" * Response Data:", nstud, "Persons and ",
ncol(gresp.noStep), "Generalized Items (", paste(Sys.time()),")\n" ) ;
utils::flush.console()
}
if ( is.null(pid) ){
pid <- seq(1,nstud)
}
# normalize person weights to sum up to nstud
pweights <- nstud * pweights / sum(pweights)
# a matrix version of person weights
pweightsM <- outer( pweights, rep(1,nitems) )
# calculate ndim if only B or Q are supplied
if ( ! is.null(B) ){ ndim <- dim(B)[3] }
if ( ! is.null(Q) ){ ndim <- dim(Q)[2] }
betaConv <- FALSE #flag of regression coefficient convergence
varConv <- FALSE #flag of variance convergence
nnodes <- length(nodes)^ndim
if ( snodes > 0 ){ nnodes <- snodes }
#--- print information about nodes
res <- tam_mml_progress_proc_nodes( progress=progress, snodes=snodes, nnodes=nnodes,
skillspace="normal", QMC=QMC)
#--- maximum no. of categories per item. Assuming dichotomous
maxK <- max( resp, na.rm=TRUE ) + 1
#--- number of parameters
np <- dim(A)[[3]]
#--- xsi parameter index
res <- tam_mml_proc_est_xsi_index(A, xsi.inits, xsi.fixed)
np <- res$np
xsi <- res$xsi
est.xsi.index0 <- est.xsi.index <- res$est.xsi.index
#--- inits variance
res <- tam_mml_inits_variance( variance.inits=variance.inits, ndim=ndim, variance.fixed=variance.fixed )
variance <- res$variance
#--- inits group
res <- tam_mml_inits_groups( group=group )
G <- res$G
groups <- res$groups
group <- res$group
var.indices <- res$var.indices
#--- inits beta
res <- tam_mml_mfr_inits_beta( Y=Y, formulaY=formulaY, dataY=dataY, G=G, group=group, groups=groups,
nstud=nstud, pweights=pweights, ridge=ridge, beta.fixed=beta.fixed, xsi.fixed=xsi.fixed,
constraint=constraint, ndim=ndim, beta.inits=beta.inits, tp=tp, gresp=gresp,
pid0=pid0 )
Y <- res$Y
nullY <- res$nullY
formulaY <- res$formulaY
nreg <- res$nreg
W <- res$W
YYinv <- res$YYinv
beta.fixed <- res$beta.fixed
beta <- res$beta
#--- response indicators
res <- tam_mml_mfr_proc_response_indicators(nitems, gresp, gresp.noStep)
resp.ind.list <- res$resp.ind.list
gresp.ind <- res$gresp.ind
gresp.noStep.ind <- res$gresp.noStep.ind
resp.ind <- res$resp.ind
nomiss <- res$nomiss
miss.items <- res$miss.items
gresp0.noStep <- res$gresp0.noStep
gresp <- res$gresp
gresp.noStep <- res$gresp.noStep
#-- delete items with only missing responses
res <- tam_mml_mfr_proc_delete_missing_items( miss.items=miss.items,
delete.red.items=delete.red.items, maxK=maxK,
gresp=gresp, gresp.noStep=gresp.noStep, gresp.noStep.ind=gresp.noStep.ind,
A=A, B=B, resp.ind.list=resp.ind.list, resp.ind=resp.ind, nitems=nitems,
pweightsM=pweightsM, pweights=pweights, nstud=nstud, progress=progress )
miss.itemsK <- res$miss.itemsK
miss.items <- res$miss.items
delete.red.items <- res$delete.red.items
A <- res$A
B <- res$B
gresp <- res$gresp
gresp.noStep <- res$gresp.noStep
gresp.noStep.ind <- res$gresp.noStep.ind
resp.ind.list <- res$resp.ind.list
resp.ind <- res$resp.ind
nitems <- res$nitems
pweightsM <- res$pweightsM
#-- AXsi
AXsi <- matrix(0,nrow=nitems,ncol=maxK ) #A times xsi
#--- parameter indices xsi parameters
res <- tam_mml_proc_xsi_parameter_index_A(A=A, np=np)
indexIP <- res$indexIP
indexIP.list <- res$indexIP.list
indexIP.list2 <- res$indexIP.list2
indexIP.no <- res$indexIP.no
#--- sufficient statistics for item parameters
cA <- t( matrix( aperm( A, c(2,1,3) ), nrow=dim(A)[3], byrow=TRUE ) )
res <- tam_mml_sufficient_statistics( nitems=nitems, maxK=maxK, resp=gresp.noStep,
resp.ind=gresp.noStep.ind, pweights=pweights, cA=cA, progress=progress )
ItemScore <- res$ItemScore
cResp <- res$cResp
col.index <- res$col.index
#--- inits xsi
res <- tam_mml_mfr_inits_xsi( gresp.noStep.ind=gresp.noStep.ind, col.index=col.index, cA=cA,
pweights=pweights, xsi=xsi, xsi.start0=xsi.start0, resp=resp, A=A,
xsi.inits=xsi.inits, xsi.fixed=xsi.fixed, ItemScore=ItemScore, est.xsi.index=est.xsi.index )
xsi <- res$xsi
ItemMax <- res$ItemMax
#--- prior distribution xsi
prior_list_xsi <- tam_mml_proc_prior_list_xsi( prior_list_xsi=prior_list_xsi, xsi=xsi )
xsi.min.deviance <- xsi
beta.min.deviance <- beta
variance.min.deviance <- variance
#--- create grid of nodes for numeric or stochastic integration
res <- tam_mml_create_nodes( snodes=snodes, nodes=nodes, ndim=ndim, QMC=QMC )
theta <- res$theta
theta2 <- res$theta2
thetawidth <- res$thetawidth
theta0.samp <- res$theta0.samp
thetasamp.density <- res$thetasamp.density
deviance <- 0
deviance.history <- tam_deviance_history_init(maxiter=maxiter)
iter <- 0
a02 <- a1 <- 999 # item parameter change
a4 <- 0
hwt.min <- 0
rprobs.min <- 0
AXsi.min <- 0
B.min <- 0
deviance.min <- 1E100
itemwt.min <- 0
#--- create unidim_simplify
res <- tam_mml_proc_unidim_simplify( Y=Y, A=A, G=G, beta.fixed=beta.fixed )
unidim_simplify <- res$unidim_simplify
YSD <- res$YSD
Avector <- res$Avector
#--- acceleration
res <- tam_acceleration_inits(acceleration=acceleration, G=G, xsi=xsi,
variance=variance)
xsi_acceleration <- res$xsi_acceleration
variance_acceleration <- res$variance_acceleration
#--- warning multiple group estimation
res <- tam_mml_warning_message_multiple_group_models( ndim=ndim, G=G)
#--- compute some arguments for EM algorithm
maxcat <- tam_rcpp_mml_maxcat(A=as.vector(A), dimA=dim(A) )
##**SE
se.xsi <- 0*xsi
se.B <- 0*B
se.xsi.min <- se.xsi
se.B.min <- se.B
devch <- 0
# display
disp <- "....................................................\n"
# define progress bar for M step
# cat("rest " ) ; a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
##############################################################
##############################################################
##############################################################
#Start EM loop here
while ( ( (!betaConv | !varConv) | ((a1 > conv) | (a4 > conv) | (a02 > convD)) ) &
(iter < maxiter) ) {
# a0 <- Sys.time()
iter <- iter + 1
#--- progress
res <- tam_mml_progress_em0(progress=progress, iter=iter, disp=disp)
# calculate nodes for Monte Carlo integration
if ( snodes > 0){
res <- tam_mml_update_stochastic_nodes( theta0.samp=theta0.samp, variance=variance,
snodes=snodes, beta=beta, theta=theta )
theta <- res$theta
theta2 <- res$theta2
thetasamp.density <- res$thetasamp.density
}
olddeviance <- deviance
#--- calculation of probabilities
res <- tam_mml_calc_prob( iIndex=1:nitems, A=A, AXsi=AXsi, B=B, xsi=xsi, theta=theta, nnodes=nnodes,
maxK=maxK, recalc=TRUE, maxcat=maxcat, use_rcpp=TRUE )
# cat("calc prob") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
rprobs <- res$rprobs
AXsi <- res$AXsi
#--- calculate student's prior distribution
gwt <- tam_stud_prior( theta=theta, Y=Y, beta=beta, variance=variance, nstud=nstud,
nnodes=nnodes, ndim=ndim, YSD=YSD, unidim_simplify=unidim_simplify,
snodes=snodes )
#--- calculate student's likelihood
res.hwt <- tam_calc_posterior( rprobs=rprobs, gwt=gwt, resp=gresp.noStep, nitems=nitems,
resp.ind.list=resp.ind.list, normalization=TRUE,
thetasamp.density=thetasamp.density, snodes=snodes, resp.ind=resp.ind,
avoid.zerosum=TRUE )
hwt <- res.hwt$hwt
#--- M step: estimation of beta and variance
resr <- tam_mml_mstep_regression( resp=gresp.noStep, hwt=hwt,
resp.ind=gresp.noStep.ind, pweights=pweights, pweightsM=pweightsM,
Y=Y, theta=theta, theta2=theta2, YYinv=YYinv, ndim=ndim, nstud=nstud,
beta.fixed=beta.fixed, variance=variance, Variance.fixed=variance.fixed,
group=group, G=G, snodes=snodes, nomiss=nomiss, iter=iter,
min.variance=min.variance, userfct.variance=userfct.variance,
variance_acceleration=variance_acceleration, est.variance=est.variance,
beta=beta )
beta <- resr$beta
variance <- resr$variance
itemwt <- resr$itemwt
variance_acceleration <- resr$variance_acceleration
variance_change <- resr$variance_change
beta_change <- resr$beta_change
if ( beta_change < conv){ betaConv <- TRUE }
if ( variance_change < conv){ varConv <- TRUE }
#--- M-step item intercepts
if (mstep_intercept_method=="optim"){
res <- tam_calc_counts( resp=gresp.noStep, theta=theta, resp.ind=gresp.noStep.ind, group=group,
maxK=maxK, pweights=pweights, hwt=hwt )
n.ik <- res$n.ik
}
res <- tam_mml_mstep_intercept( A=A, xsi=xsi, AXsi=AXsi, B=B, theta=theta,
nnodes=nnodes, maxK=maxK, Msteps=Msteps, rprobs=rprobs, np=np,
est.xsi.index0=est.xsi.index0, itemwt=itemwt, indexIP.no=indexIP.no,
indexIP.list2=indexIP.list2, Avector=Avector, max.increment=max.increment,
xsi.fixed=xsi.fixed, fac.oldxsi=fac.oldxsi, ItemScore=ItemScore,
convM=convM, progress=progress, nitems=nitems, iter=iter,
increment.factor=increment.factor, xsi_acceleration=xsi_acceleration,
trim_increment=trim_increment, prior_list_xsi=prior_list_xsi,
mstep_intercept_method=mstep_intercept_method, n.ik=n.ik, maxcat=maxcat )
xsi <- res$xsi
se.xsi <- res$se.xsi
max.increment <- res$max.increment
xsi_acceleration <- res$xsi_acceleration
xsi_change <- res$xsi_change
logprior_xsi <- res$logprior_xsi
#--- compute deviance
res <- tam_mml_compute_deviance( loglike_num=res.hwt$rfx, loglike_sto=res.hwt$rfx,
snodes=snodes, thetawidth=thetawidth, pweights=pweights, deviance=deviance,
deviance.history=deviance.history, iter=iter, logprior_xsi=logprior_xsi )
deviance <- res$deviance
deviance.history <- res$deviance.history
a01 <- rel_deviance_change <- res$rel_deviance_change
a02 <- deviance_change <- res$deviance_change
if (con$dev_crit=="relative" ){ a02 <- a01 }
penalty_xsi <- res$penalty_xsi
deviance_change_signed <- res$deviance_change_signed
if( deviance < deviance.min ){
xsi.min.deviance <- xsi
beta.min.deviance <- beta
variance.min.deviance <- variance
hwt.min <- hwt
AXsi.min <- AXsi
B.min <- B
deviance.min <- deviance
itemwt.min <- itemwt
se.xsi.min <- se.xsi
se.B.min <- se.B
}
a1 <- xsi_change
a2 <- beta_change
a3 <- variance_change
devch <- - ( deviance - olddeviance )
#--- print progress
res <- tam_mml_progress_em( progress=progress, deviance=deviance, deviance_change=deviance_change,
iter=iter, rel_deviance_change=rel_deviance_change, xsi_change=xsi_change,
beta_change=beta_change, variance_change=variance_change, B_change=0,
devch=devch, penalty_xsi=penalty_xsi )
} # end of EM loop
#############################################################
#############################################################
xsi.min.deviance -> xsi
beta.min.deviance -> beta
variance.min.deviance -> variance
hwt.min -> hwt
AXsi.min -> AXsi
B.min -> B
deviance.min -> deviance
itemwt.min -> itemwt
se.xsi.min -> se.xsi
se.B.min -> se.B
#******
#***
resp <- gresp0.noStep
resp.ind <- gresp.noStep.ind
#*** include NAs in AXsi
AXsi <- tam_mml_include_NA_AXsi(AXsi=AXsi, maxcat=maxcat, A=A, xsi=xsi)
#****
# look for non-estimable xsi parameters
# xsi[ xsi==99 ] <- NA
#******
# generate input for fixed parameters
xsi.fixed.estimated <- tam_generate_xsi_fixed_estimated( xsi=xsi, A=A )
B.fixed.estimated <- tam_generate_B_fixed_estimated(B=B)
#**** standard errors AXsi
se.AXsi <- tam_mml_se_AXsi( AXsi=AXsi, A=A, se.xsi=se.xsi, maxK=maxK )
##*** information criteria
ic <- tam_mml_ic( nstud=nstud, deviance=deviance, xsi=xsi, xsi.fixed=xsi.fixed,
beta=beta, beta.fixed=beta.fixed, ndim=ndim,
variance.fixed=variance.fixed, G=G, irtmodel=irtmodel, B_orig=NULL,
B.fixed=B.fixed, E=E, est.variance=TRUE, resp=resp,
est.slopegroups=NULL, variance.Npars=NULL, group=group,
penalty_xsi=penalty_xsi, pweights=pweights, resp.ind=resp.ind )
#*** calculate counts
res <- tam_calc_counts( resp=gresp.noStep, theta=theta, resp.ind=gresp.noStep.ind,
group=group, maxK=maxK, pweights=pweights, hwt=hwt )
n.ik <- res$n.ik
pi.k <- res$pi.k
#--- collect item parameters
item1 <- tam_itempartable( resp=gresp.noStep, maxK=maxK, AXsi=AXsi, B=B,
ndim=ndim, resp.ind=gresp.noStep.ind,
rprobs=rprobs, n.ik=n.ik, pi.k=pi.k, order=TRUE, pweights=pweights )
#--- collect all person statistics
res <- tam_mml_person_posterior( pid=pid, nstud=nstud, pweights=pweights,
resp=gresp.noStep, resp.ind=gresp.noStep.ind, snodes=snodes,
hwtE=hwt, hwt=hwt, ndim=ndim, theta=theta )
person <- res$person
EAP.rel <- res$EAP.rel
#******
s2 <- Sys.time()
item <- data.frame( "xsi.index"=1:np, "xsi.label"=dimnames(A)[[3]], "est"=xsi )
if (progress){
cat(disp)
cat("Item Parameters\n")
item2 <- item
item2[,"est"] <- round( item2[,"est"], 4 )
print(item2)
cat("...................................\n")
cat("Regression Coefficients\n")
print( beta, 4 )
cat("\nVariance:\n" ) #, round( varianceM, 4 ))
if (G==1 ){
varianceM <- matrix( variance, nrow=ndim, ncol=ndim )
print( varianceM, 4 )
} else {
print( variance[ var.indices], 4 )
}
if ( ndim > 1){
cat("\nCorrelation Matrix:\n" ) #, round( varianceM, 4 ))
print( stats::cov2cor(varianceM), 4 )
}
cat("\n\nEAP Reliability:\n")
print( round (EAP.rel,3) )
cat("\n-----------------------------")
devmin <- which.min( deviance.history[,2] )
if ( devmin < iter ){
cat(paste("\n\nMinimal deviance at iteration ", devmin,
" with deviance ", round(deviance.history[ devmin, 2 ],3), sep=""), "\n")
cat("The corresponding estimates are\n")
cat(" xsi.min.deviance\n beta.min.deviance \n variance.min.deviance\n\n")
}
cat( "\nStart: ", paste(s1))
cat( "\nEnd: ", paste(s2),"\n")
print(s2-s1)
cat( "\n" )
}
#--- collect xsi parameters
res <- tam_mml_mfr_collect_xsi_parameters( xsi.constr=xsi.constr, resp=resp, A=A, xsi=xsi,
se.xsi=se.xsi, delete.red.items=delete.red.items, itemnames=itemnames,
miss.items=miss.items )
resp <- res$resp
xsi <- res$xsi
xsi.facets <- res$xsi.facets
#--- recompute posterior
res.hwt <- tam_calc_posterior( rprobs=rprobs, gwt=1+0*gwt, resp=resp, nitems=nitems,
resp.ind.list=resp.ind.list, normalization=FALSE, thetasamp.density=thetasamp.density,
snodes=snodes, resp.ind=resp.ind )
res.like <- res.hwt$hwt
#***** standardized coefficients
latreg_stand <- tam_latent_regression_standardized_solution(variance=variance, beta=beta, Y=Y)
#--- OUTPUT LIST
deviance.history <- deviance.history[ 1:iter, ]
res <- list( "xsi"=xsi, "xsi.facets"=xsi.facets,
"beta"=beta, "variance"=variance,
"item"=item1,
"person"=person, pid=pid, "EAP.rel"=EAP.rel,
"post"=hwt, "rprobs"=rprobs, "itemweight"=itemwt,
"theta"=theta,
"n.ik"=n.ik, "pi.k"=pi.k,
"Y"=Y, "resp"=resp,
"resp.ind"=resp.ind, "group"=group,
"G"=if ( is.null(group)){1} else { length(unique( group ) )},
"groups"=if ( is.null(group)){1} else { groups },
"formulaY"=formulaY, "dataY"=dataY,
"pweights"=pweights,
"time"=c(s1,s2), "A"=A, "B"=B,
"se.B"=se.B,
"nitems"=nitems, "maxK"=maxK, "AXsi"=AXsi,
"AXsi_"=- AXsi,
"se.AXsi"=se.AXsi,
"nstud"=nstud, "resp.ind.list"=resp.ind.list,
"hwt"=hwt, "like"=res.like, "ndim"=ndim,
"xsi.fixed"=xsi.fixed,
"xsi.fixed.estimated"=xsi.fixed.estimated,
"B.fixed.estimated"=B.fixed.estimated,
"beta.fixed"=beta.fixed, "Q"=Q,
"formulaA"=formulaA, "facets"=facets,
"xsi.constr"=xsi.constr,
"variance.fixed"=variance.fixed,
"nnodes"=nnodes, "deviance"=ic$deviance,
"ic"=ic, thetasamp.density=thetasamp.density,
"deviance.history"=deviance.history,
"control"=con1a, "irtmodel"=irtmodel,
"iter"=iter, "resp_orig"=resp_orig,
"printxsi"=TRUE, "YSD"=YSD, "PSF"=PSF,
CALL=CALL, latreg_stand=latreg_stand,
prior_list_xsi=prior_list_xsi, penalty_xsi=penalty_xsi
)
class(res) <- "tam.mml"
return(res)
}
| /R/tam.mml.mfr.R | no_license | cran/TAM | R | false | false | 23,692 | r | ## File Name: tam.mml.mfr.R
## File Version: 9.955
tam.mml.mfr <- function( resp, Y=NULL, group=NULL, irtmodel="1PL",
formulaY=NULL, dataY=NULL, ndim=1, pid=NULL, xsi.fixed=NULL,
xsi.setnull=NULL, xsi.inits=NULL, beta.fixed=NULL, beta.inits=NULL,
variance.fixed=NULL, variance.inits=NULL, est.variance=TRUE,
formulaA=~item+item:step, constraint="cases", A=NULL, B=NULL,
B.fixed=NULL, Q=NULL, facets=NULL, est.slopegroups=NULL, E=NULL,
pweights=NULL, verbose=TRUE, control=list(), delete.red.items=TRUE )
{
CALL <- match.call()
a0 <- Sys.time()
s1 <- Sys.time()
prior_list_xsi=NULL
mstep_intercept_method <- "R"
# display
disp <- "....................................................\n"
increment.factor <- progress <- nodes <- snodes <- ridge <- xsi.start0 <- QMC <- NULL
maxiter <- conv <- convD <- min.variance <- max.increment <- Msteps <- convM <- NULL
resp_orig <- resp
B00 <- B
B <- trim_increment <- NULL
fac.oldxsi <- acceleration <- NULL
#**** handle verbose argument
args_CALL <- as.list( sys.call() )
if ( ! tam_in_names_list( list=control, variable="progress" ) ){
control$progress <- verbose
}
#--- attach control elements
e1 <- environment()
tam_fct <- "tam.mml.mfr"
res <- tam_mml_control_list_define(control=control, envir=e1, tam_fct=tam_fct,
prior_list_xsi=prior_list_xsi)
con <- res$con
con1a <- res$con1a
#- check constraint
constraint <- tam_mml_constraint_check(constraint=constraint)
# userfct.variance is not allowed in tam.mml.mfr
userfct.variance <- NULL
fac.oldxsi <- max( 0, min( c( fac.oldxsi, .95 ) ) )
if ( constraint=="items" ){ beta.fixed <- FALSE }
pid0 <- pid <- unname(c(unlist(pid)))
if (progress){
cat(disp)
cat("Processing Data ", paste(Sys.time()), "\n") ; utils::flush.console()
}
if ( ! is.null(group) ){
con1a$QMC <- QMC <- FALSE
con1a$snodes <- snodes <- 0
}
resp <- as.matrix(resp)
resp <- add.colnames.resp(resp)
itemnames <- colnames(resp)
nullY <- is.null(Y)
if ( ! is.null(facets) ){
facets <- as.data.frame(facets)
}
# cat("read data" ) ; a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
#--- compute maxKi
res <- tam_mml_mfr_proc_compute_maxKi(resp=resp, facets=facets)
maxKi <- res$maxKi
#--- handle formula and facets
resp00 <- resp
res <- tam_mml_mfr_dataprep( formulaA=formulaA, xsi.setnull=xsi.setnull, B=B,
Q=Q, resp=resp, pid=pid, facets=facets, beta.fixed=beta.fixed )
formulaA <- res$formula_update
xsi.setnull <- res$xsi.setnull
beta.fixed <- res$beta.fixed
facets <- res$facets
PSF <- res$PSF
pid <- res$pid
#cat(" mml mfr dataprep " ) ; a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
#--- create design matrices
res <- tam_mml_mfr_proc_create_design_matrices( pid=pid, maxKi=maxKi, resp=resp,
formulaA=formulaA, facets=facets, constraint=constraint, ndim=ndim, Q=Q,
A=A, B=B, progress=progress, xsi.fixed=xsi.fixed, resp00=resp00, B00=B00,
beta.fixed=beta.fixed )
pid <- res$pid
diffKi <- res$diffKi
var_ki <- res$var_ki
xsi.fixed <- res$xsi.fixed
xsi.elim <- res$xsi.elim
beta.fixed <- res$beta.fixed
A <- res$A
cA <- res$cA
B <- res$B
Q <- res$Q
X <- res$X
X.red <- res$X.red
gresp <- res$gresp
gresp.noStep <- res$gresp.noStep
xsi.constr <- res$xsi.constr
design <- res$design
# cat(" --- design matrix ready" ) ; a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
#--- processing in case of multiple person IDs in a dataset
tp <- max(table(pid))
if ( tp > 1){
res <- tam_mml_mfr_proc_multiple_person_ids( pid=pid, tp=tp, gresp=gresp,
gresp.noStep=gresp.noStep, progress=progress, group=group, Y=Y,
pweights=pweights)
pid <- res$pid
gresp <- res$gresp
gresp.noStep <- res$gresp.noStep
group <- res$group
Y <- res$Y
pweights <- res$pweights
}
# cat("process data in case of multiple persons" ) ; a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
#--- set some xsi effects to zero
res <- tam_mml_mfr_proc_xsi_setnull( xsi.setnull=xsi.setnull, A=A, xsi.fixed=xsi.fixed )
xsi.fixed <- res$xsi.fixed
xsi0 <- res$xsi0
nitems <- nrow(X.red)
nstud <- nrow(gresp) # number of students
if ( is.null(pweights) ){
pweights <- rep(1,nstud) # weights of response pattern
}
if (progress){
cat(" * Response Data:", nstud, "Persons and ",
ncol(gresp.noStep), "Generalized Items (", paste(Sys.time()),")\n" ) ;
utils::flush.console()
}
if ( is.null(pid) ){
pid <- seq(1,nstud)
}
# normalize person weights to sum up to nstud
pweights <- nstud * pweights / sum(pweights)
# a matrix version of person weights
pweightsM <- outer( pweights, rep(1,nitems) )
# calculate ndim if only B or Q are supplied
if ( ! is.null(B) ){ ndim <- dim(B)[3] }
if ( ! is.null(Q) ){ ndim <- dim(Q)[2] }
betaConv <- FALSE #flag of regression coefficient convergence
varConv <- FALSE #flag of variance convergence
nnodes <- length(nodes)^ndim
if ( snodes > 0 ){ nnodes <- snodes }
#--- print information about nodes
res <- tam_mml_progress_proc_nodes( progress=progress, snodes=snodes, nnodes=nnodes,
skillspace="normal", QMC=QMC)
#--- maximum no. of categories per item. Assuming dichotomous
maxK <- max( resp, na.rm=TRUE ) + 1
#--- number of parameters
np <- dim(A)[[3]]
#--- xsi parameter index
res <- tam_mml_proc_est_xsi_index(A, xsi.inits, xsi.fixed)
np <- res$np
xsi <- res$xsi
est.xsi.index0 <- est.xsi.index <- res$est.xsi.index
#--- inits variance
res <- tam_mml_inits_variance( variance.inits=variance.inits, ndim=ndim, variance.fixed=variance.fixed )
variance <- res$variance
#--- inits group
res <- tam_mml_inits_groups( group=group )
G <- res$G
groups <- res$groups
group <- res$group
var.indices <- res$var.indices
#--- inits beta
res <- tam_mml_mfr_inits_beta( Y=Y, formulaY=formulaY, dataY=dataY, G=G, group=group, groups=groups,
nstud=nstud, pweights=pweights, ridge=ridge, beta.fixed=beta.fixed, xsi.fixed=xsi.fixed,
constraint=constraint, ndim=ndim, beta.inits=beta.inits, tp=tp, gresp=gresp,
pid0=pid0 )
Y <- res$Y
nullY <- res$nullY
formulaY <- res$formulaY
nreg <- res$nreg
W <- res$W
YYinv <- res$YYinv
beta.fixed <- res$beta.fixed
beta <- res$beta
#--- response indicators
res <- tam_mml_mfr_proc_response_indicators(nitems, gresp, gresp.noStep)
resp.ind.list <- res$resp.ind.list
gresp.ind <- res$gresp.ind
gresp.noStep.ind <- res$gresp.noStep.ind
resp.ind <- res$resp.ind
nomiss <- res$nomiss
miss.items <- res$miss.items
gresp0.noStep <- res$gresp0.noStep
gresp <- res$gresp
gresp.noStep <- res$gresp.noStep
#-- delete items with only missing responses
res <- tam_mml_mfr_proc_delete_missing_items( miss.items=miss.items,
delete.red.items=delete.red.items, maxK=maxK,
gresp=gresp, gresp.noStep=gresp.noStep, gresp.noStep.ind=gresp.noStep.ind,
A=A, B=B, resp.ind.list=resp.ind.list, resp.ind=resp.ind, nitems=nitems,
pweightsM=pweightsM, pweights=pweights, nstud=nstud, progress=progress )
miss.itemsK <- res$miss.itemsK
miss.items <- res$miss.items
delete.red.items <- res$delete.red.items
A <- res$A
B <- res$B
gresp <- res$gresp
gresp.noStep <- res$gresp.noStep
gresp.noStep.ind <- res$gresp.noStep.ind
resp.ind.list <- res$resp.ind.list
resp.ind <- res$resp.ind
nitems <- res$nitems
pweightsM <- res$pweightsM
#-- AXsi
AXsi <- matrix(0,nrow=nitems,ncol=maxK ) #A times xsi
#--- parameter indices xsi parameters
res <- tam_mml_proc_xsi_parameter_index_A(A=A, np=np)
indexIP <- res$indexIP
indexIP.list <- res$indexIP.list
indexIP.list2 <- res$indexIP.list2
indexIP.no <- res$indexIP.no
#--- sufficient statistics for item parameters
cA <- t( matrix( aperm( A, c(2,1,3) ), nrow=dim(A)[3], byrow=TRUE ) )
res <- tam_mml_sufficient_statistics( nitems=nitems, maxK=maxK, resp=gresp.noStep,
resp.ind=gresp.noStep.ind, pweights=pweights, cA=cA, progress=progress )
ItemScore <- res$ItemScore
cResp <- res$cResp
col.index <- res$col.index
#--- inits xsi
res <- tam_mml_mfr_inits_xsi( gresp.noStep.ind=gresp.noStep.ind, col.index=col.index, cA=cA,
pweights=pweights, xsi=xsi, xsi.start0=xsi.start0, resp=resp, A=A,
xsi.inits=xsi.inits, xsi.fixed=xsi.fixed, ItemScore=ItemScore, est.xsi.index=est.xsi.index )
xsi <- res$xsi
ItemMax <- res$ItemMax
#--- prior distribution xsi
prior_list_xsi <- tam_mml_proc_prior_list_xsi( prior_list_xsi=prior_list_xsi, xsi=xsi )
xsi.min.deviance <- xsi
beta.min.deviance <- beta
variance.min.deviance <- variance
#--- create grid of nodes for numeric or stochastic integration
res <- tam_mml_create_nodes( snodes=snodes, nodes=nodes, ndim=ndim, QMC=QMC )
theta <- res$theta
theta2 <- res$theta2
thetawidth <- res$thetawidth
theta0.samp <- res$theta0.samp
thetasamp.density <- res$thetasamp.density
deviance <- 0
deviance.history <- tam_deviance_history_init(maxiter=maxiter)
iter <- 0
a02 <- a1 <- 999 # item parameter change
a4 <- 0
hwt.min <- 0
rprobs.min <- 0
AXsi.min <- 0
B.min <- 0
deviance.min <- 1E100
itemwt.min <- 0
#--- create unidim_simplify
res <- tam_mml_proc_unidim_simplify( Y=Y, A=A, G=G, beta.fixed=beta.fixed )
unidim_simplify <- res$unidim_simplify
YSD <- res$YSD
Avector <- res$Avector
#--- acceleration
res <- tam_acceleration_inits(acceleration=acceleration, G=G, xsi=xsi,
variance=variance)
xsi_acceleration <- res$xsi_acceleration
variance_acceleration <- res$variance_acceleration
#--- warning multiple group estimation
res <- tam_mml_warning_message_multiple_group_models( ndim=ndim, G=G)
#--- compute some arguments for EM algorithm
maxcat <- tam_rcpp_mml_maxcat(A=as.vector(A), dimA=dim(A) )
##**SE
se.xsi <- 0*xsi
se.B <- 0*B
se.xsi.min <- se.xsi
se.B.min <- se.B
devch <- 0
# display
disp <- "....................................................\n"
# define progress bar for M step
# cat("rest " ) ; a1 <- Sys.time() ; print(a1-a0) ; a0 <- a1
##############################################################
##############################################################
##############################################################
#Start EM loop here
while ( ( (!betaConv | !varConv) | ((a1 > conv) | (a4 > conv) | (a02 > convD)) ) &
(iter < maxiter) ) {
# a0 <- Sys.time()
iter <- iter + 1
#--- progress
res <- tam_mml_progress_em0(progress=progress, iter=iter, disp=disp)
# calculate nodes for Monte Carlo integration
if ( snodes > 0){
res <- tam_mml_update_stochastic_nodes( theta0.samp=theta0.samp, variance=variance,
snodes=snodes, beta=beta, theta=theta )
theta <- res$theta
theta2 <- res$theta2
thetasamp.density <- res$thetasamp.density
}
olddeviance <- deviance
#--- calculation of probabilities
res <- tam_mml_calc_prob( iIndex=1:nitems, A=A, AXsi=AXsi, B=B, xsi=xsi, theta=theta, nnodes=nnodes,
maxK=maxK, recalc=TRUE, maxcat=maxcat, use_rcpp=TRUE )
# cat("calc prob") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
rprobs <- res$rprobs
AXsi <- res$AXsi
#--- calculate student's prior distribution
gwt <- tam_stud_prior( theta=theta, Y=Y, beta=beta, variance=variance, nstud=nstud,
nnodes=nnodes, ndim=ndim, YSD=YSD, unidim_simplify=unidim_simplify,
snodes=snodes )
#--- calculate student's likelihood
res.hwt <- tam_calc_posterior( rprobs=rprobs, gwt=gwt, resp=gresp.noStep, nitems=nitems,
resp.ind.list=resp.ind.list, normalization=TRUE,
thetasamp.density=thetasamp.density, snodes=snodes, resp.ind=resp.ind,
avoid.zerosum=TRUE )
hwt <- res.hwt$hwt
#--- M step: estimation of beta and variance
resr <- tam_mml_mstep_regression( resp=gresp.noStep, hwt=hwt,
resp.ind=gresp.noStep.ind, pweights=pweights, pweightsM=pweightsM,
Y=Y, theta=theta, theta2=theta2, YYinv=YYinv, ndim=ndim, nstud=nstud,
beta.fixed=beta.fixed, variance=variance, Variance.fixed=variance.fixed,
group=group, G=G, snodes=snodes, nomiss=nomiss, iter=iter,
min.variance=min.variance, userfct.variance=userfct.variance,
variance_acceleration=variance_acceleration, est.variance=est.variance,
beta=beta )
beta <- resr$beta
variance <- resr$variance
itemwt <- resr$itemwt
variance_acceleration <- resr$variance_acceleration
variance_change <- resr$variance_change
beta_change <- resr$beta_change
if ( beta_change < conv){ betaConv <- TRUE }
if ( variance_change < conv){ varConv <- TRUE }
#--- M-step item intercepts
if (mstep_intercept_method=="optim"){
res <- tam_calc_counts( resp=gresp.noStep, theta=theta, resp.ind=gresp.noStep.ind, group=group,
maxK=maxK, pweights=pweights, hwt=hwt )
n.ik <- res$n.ik
}
res <- tam_mml_mstep_intercept( A=A, xsi=xsi, AXsi=AXsi, B=B, theta=theta,
nnodes=nnodes, maxK=maxK, Msteps=Msteps, rprobs=rprobs, np=np,
est.xsi.index0=est.xsi.index0, itemwt=itemwt, indexIP.no=indexIP.no,
indexIP.list2=indexIP.list2, Avector=Avector, max.increment=max.increment,
xsi.fixed=xsi.fixed, fac.oldxsi=fac.oldxsi, ItemScore=ItemScore,
convM=convM, progress=progress, nitems=nitems, iter=iter,
increment.factor=increment.factor, xsi_acceleration=xsi_acceleration,
trim_increment=trim_increment, prior_list_xsi=prior_list_xsi,
mstep_intercept_method=mstep_intercept_method, n.ik=n.ik, maxcat=maxcat )
xsi <- res$xsi
se.xsi <- res$se.xsi
max.increment <- res$max.increment
xsi_acceleration <- res$xsi_acceleration
xsi_change <- res$xsi_change
logprior_xsi <- res$logprior_xsi
#--- compute deviance
res <- tam_mml_compute_deviance( loglike_num=res.hwt$rfx, loglike_sto=res.hwt$rfx,
snodes=snodes, thetawidth=thetawidth, pweights=pweights, deviance=deviance,
deviance.history=deviance.history, iter=iter, logprior_xsi=logprior_xsi )
deviance <- res$deviance
deviance.history <- res$deviance.history
a01 <- rel_deviance_change <- res$rel_deviance_change
a02 <- deviance_change <- res$deviance_change
if (con$dev_crit=="relative" ){ a02 <- a01 }
penalty_xsi <- res$penalty_xsi
deviance_change_signed <- res$deviance_change_signed
if( deviance < deviance.min ){
xsi.min.deviance <- xsi
beta.min.deviance <- beta
variance.min.deviance <- variance
hwt.min <- hwt
AXsi.min <- AXsi
B.min <- B
deviance.min <- deviance
itemwt.min <- itemwt
se.xsi.min <- se.xsi
se.B.min <- se.B
}
a1 <- xsi_change
a2 <- beta_change
a3 <- variance_change
devch <- - ( deviance - olddeviance )
#--- print progress
res <- tam_mml_progress_em( progress=progress, deviance=deviance, deviance_change=deviance_change,
iter=iter, rel_deviance_change=rel_deviance_change, xsi_change=xsi_change,
beta_change=beta_change, variance_change=variance_change, B_change=0,
devch=devch, penalty_xsi=penalty_xsi )
} # end of EM loop
#############################################################
#############################################################
xsi.min.deviance -> xsi
beta.min.deviance -> beta
variance.min.deviance -> variance
hwt.min -> hwt
AXsi.min -> AXsi
B.min -> B
deviance.min -> deviance
itemwt.min -> itemwt
se.xsi.min -> se.xsi
se.B.min -> se.B
#******
#***
resp <- gresp0.noStep
resp.ind <- gresp.noStep.ind
#*** include NAs in AXsi
AXsi <- tam_mml_include_NA_AXsi(AXsi=AXsi, maxcat=maxcat, A=A, xsi=xsi)
#****
# look for non-estimable xsi parameters
# xsi[ xsi==99 ] <- NA
#******
# generate input for fixed parameters
xsi.fixed.estimated <- tam_generate_xsi_fixed_estimated( xsi=xsi, A=A )
B.fixed.estimated <- tam_generate_B_fixed_estimated(B=B)
#**** standard errors AXsi
se.AXsi <- tam_mml_se_AXsi( AXsi=AXsi, A=A, se.xsi=se.xsi, maxK=maxK )
##*** information criteria
ic <- tam_mml_ic( nstud=nstud, deviance=deviance, xsi=xsi, xsi.fixed=xsi.fixed,
beta=beta, beta.fixed=beta.fixed, ndim=ndim,
variance.fixed=variance.fixed, G=G, irtmodel=irtmodel, B_orig=NULL,
B.fixed=B.fixed, E=E, est.variance=TRUE, resp=resp,
est.slopegroups=NULL, variance.Npars=NULL, group=group,
penalty_xsi=penalty_xsi, pweights=pweights, resp.ind=resp.ind )
#*** calculate counts
res <- tam_calc_counts( resp=gresp.noStep, theta=theta, resp.ind=gresp.noStep.ind,
group=group, maxK=maxK, pweights=pweights, hwt=hwt )
n.ik <- res$n.ik
pi.k <- res$pi.k
#--- collect item parameters
item1 <- tam_itempartable( resp=gresp.noStep, maxK=maxK, AXsi=AXsi, B=B,
ndim=ndim, resp.ind=gresp.noStep.ind,
rprobs=rprobs, n.ik=n.ik, pi.k=pi.k, order=TRUE, pweights=pweights )
#--- collect all person statistics
res <- tam_mml_person_posterior( pid=pid, nstud=nstud, pweights=pweights,
resp=gresp.noStep, resp.ind=gresp.noStep.ind, snodes=snodes,
hwtE=hwt, hwt=hwt, ndim=ndim, theta=theta )
person <- res$person
EAP.rel <- res$EAP.rel
#******
s2 <- Sys.time()
item <- data.frame( "xsi.index"=1:np, "xsi.label"=dimnames(A)[[3]], "est"=xsi )
if (progress){
cat(disp)
cat("Item Parameters\n")
item2 <- item
item2[,"est"] <- round( item2[,"est"], 4 )
print(item2)
cat("...................................\n")
cat("Regression Coefficients\n")
print( beta, 4 )
cat("\nVariance:\n" ) #, round( varianceM, 4 ))
if (G==1 ){
varianceM <- matrix( variance, nrow=ndim, ncol=ndim )
print( varianceM, 4 )
} else {
print( variance[ var.indices], 4 )
}
if ( ndim > 1){
cat("\nCorrelation Matrix:\n" ) #, round( varianceM, 4 ))
print( stats::cov2cor(varianceM), 4 )
}
cat("\n\nEAP Reliability:\n")
print( round (EAP.rel,3) )
cat("\n-----------------------------")
devmin <- which.min( deviance.history[,2] )
if ( devmin < iter ){
cat(paste("\n\nMinimal deviance at iteration ", devmin,
" with deviance ", round(deviance.history[ devmin, 2 ],3), sep=""), "\n")
cat("The corresponding estimates are\n")
cat(" xsi.min.deviance\n beta.min.deviance \n variance.min.deviance\n\n")
}
cat( "\nStart: ", paste(s1))
cat( "\nEnd: ", paste(s2),"\n")
print(s2-s1)
cat( "\n" )
}
#--- collect xsi parameters
res <- tam_mml_mfr_collect_xsi_parameters( xsi.constr=xsi.constr, resp=resp, A=A, xsi=xsi,
se.xsi=se.xsi, delete.red.items=delete.red.items, itemnames=itemnames,
miss.items=miss.items )
resp <- res$resp
xsi <- res$xsi
xsi.facets <- res$xsi.facets
#--- recompute posterior
res.hwt <- tam_calc_posterior( rprobs=rprobs, gwt=1+0*gwt, resp=resp, nitems=nitems,
resp.ind.list=resp.ind.list, normalization=FALSE, thetasamp.density=thetasamp.density,
snodes=snodes, resp.ind=resp.ind )
res.like <- res.hwt$hwt
#***** standardized coefficients
latreg_stand <- tam_latent_regression_standardized_solution(variance=variance, beta=beta, Y=Y)
#--- OUTPUT LIST
deviance.history <- deviance.history[ 1:iter, ]
res <- list( "xsi"=xsi, "xsi.facets"=xsi.facets,
"beta"=beta, "variance"=variance,
"item"=item1,
"person"=person, pid=pid, "EAP.rel"=EAP.rel,
"post"=hwt, "rprobs"=rprobs, "itemweight"=itemwt,
"theta"=theta,
"n.ik"=n.ik, "pi.k"=pi.k,
"Y"=Y, "resp"=resp,
"resp.ind"=resp.ind, "group"=group,
"G"=if ( is.null(group)){1} else { length(unique( group ) )},
"groups"=if ( is.null(group)){1} else { groups },
"formulaY"=formulaY, "dataY"=dataY,
"pweights"=pweights,
"time"=c(s1,s2), "A"=A, "B"=B,
"se.B"=se.B,
"nitems"=nitems, "maxK"=maxK, "AXsi"=AXsi,
"AXsi_"=- AXsi,
"se.AXsi"=se.AXsi,
"nstud"=nstud, "resp.ind.list"=resp.ind.list,
"hwt"=hwt, "like"=res.like, "ndim"=ndim,
"xsi.fixed"=xsi.fixed,
"xsi.fixed.estimated"=xsi.fixed.estimated,
"B.fixed.estimated"=B.fixed.estimated,
"beta.fixed"=beta.fixed, "Q"=Q,
"formulaA"=formulaA, "facets"=facets,
"xsi.constr"=xsi.constr,
"variance.fixed"=variance.fixed,
"nnodes"=nnodes, "deviance"=ic$deviance,
"ic"=ic, thetasamp.density=thetasamp.density,
"deviance.history"=deviance.history,
"control"=con1a, "irtmodel"=irtmodel,
"iter"=iter, "resp_orig"=resp_orig,
"printxsi"=TRUE, "YSD"=YSD, "PSF"=PSF,
CALL=CALL, latreg_stand=latreg_stand,
prior_list_xsi=prior_list_xsi, penalty_xsi=penalty_xsi
)
class(res) <- "tam.mml"
return(res)
}
|
# Coursera Getting and Cleaning Data -- Course Project
# Author: Paul Kerl
# Date: 12/27/2015
###################################################################################
###### 1. Merges the training and the test sets to create one data set. ###########
###################################################################################
#set working directory
#setwd("C:/working/directory/") #dummy working directory
#X test and training sets (561 feature dataset)
df_test <- read.table("UCI HAR Dataset/test/X_test.txt")
df_train <- read.table("UCI HAR Dataset/train/X_train.txt")
df <- rbind(df_test, df_train)
#subject ID test and training sets
df_subtest <- read.table("UCI HAR Dataset/test/subject_test.txt")
df_subtrain <- read.table("UCI HAR Dataset/train/subject_train.txt")
df_sub <- rbind(df_subtest, df_subtrain)
#name the column "Subject"
colnames(df_sub) <- c("Subject")
#Y test and training sets (activities)
df_ytest <- read.table("UCI HAR Dataset/test/y_test.txt")
df_ytrain <- read.table("UCI HAR Dataset/train/y_train.txt")
df_act <- rbind(df_ytest, df_ytrain)
#name the column "Activity"
colnames(df_act) <- c("Activity")
#merge datasets (Final dataset for Part 1)
df_full <- cbind(df,df_sub,df_act) # 1st through 561st columns are features,
# 562nd column is subjects, 563rd column is
# activities
###################################################################################
###### 2. Extracts only the measurements on the mean and standard deviation ######
###### for each measurement. ######
###################################################################################
# extract only the measurements on the mean and standard deviation for
# measurements
# Interpreted as any variable containing "std", "mean" or "Mean"
#load in features.txt
features <- read.table("UCI HAR Dataset/features.txt")
#filter based on containing one of "std", "mean" or "Mean"
meanAndStdDevCols <- filter(features, grepl("mean|Mean|std",V2))[,1]
meanAndStdDevCols <- c(meanAndStdDevCols,562,563) #add on Subject and Activity columns
meanAndStdDevSubAct <- df_full[meanAndStdDevCols]
#subsetted dataset (Final dataset for Part 2)
#Note: Includes subject and activity columns
meanAndStdDev <- df_full[meanAndStdDevCols]
###################################################################################
###### 3. Uses descriptive activity names to name the activities in the data set ##
###################################################################################
#use activity labels dataset to label activities
activities <- read.table("UCI HAR Dataset/activity_labels.txt")
#replace the numbers using the match on activity # to activity name
meanAndStdDev$Activity <- activities[,2][match(meanAndStdDev$Activity, activities[,1])]
###################################################################################
###### 4. Appropriately labels the data set with descriptive variable names. #####
###################################################################################
#variable labels are in the "features" dataframe
#label columns with features dataframe
column_names <- c(as.vector(features[,2]),"Subject","Activity")
#add "descriptive" column names using string replacement
#Acc -> Acceleration
column_names <- gsub("Acc","Acceleration",column_names)
#Mag -> Magnitude
column_names <- gsub("Mag","Magnitude",column_names)
#Freq -> Frequency
column_names <- gsub("Freq","Frequency",column_names)
#mean -> Mean
column_names <- gsub("mean","Mean",column_names)
#std -> StandardDeviation
column_names <- gsub("std","StandardDeviation",column_names)
#Remove "-" and "()"
column_names <- gsub("-","",column_names)
column_names <- gsub("\\(","",column_names)
column_names <- gsub("\\)","",column_names)
#label full dataset with modified column names
colnames(df_full) <- column_names
#filter based on containing one of "std", "mean" or "Mean"
meanAndStdDevCols <- filter(features, grepl("mean|Mean|std",V2))[,1]
meanAndStdDevCols <- c(meanAndStdDevCols,562,563) #add on Subject and Activity columns
#final labeled dataset
meanAndStdDevSubAct <- df_full[meanAndStdDevCols]
###################################################################################
###### 5. From the data set in step 4, creates a second, independent tidy data ####
###### set with the average of each variable for each activity and each ####
###### subject. ####
###################################################################################
#use dplyr to group by activity and subject and take mean of each variable
library(dplyr)
#take mean of these columns for each subject and activity
df_final <- meanAndStdDevSubAct %>% group_by(Subject,Activity) %>% summarise_each(funs(mean))
#write final table
write.table(df_final,file="final.txt", row.name=FALSE)
| /run_analysis.R | no_license | pykerl/GetDataAssignment2 | R | false | false | 5,004 | r | # Coursera Getting and Cleaning Data -- Course Project
# Author: Paul Kerl
# Date: 12/27/2015
###################################################################################
###### 1. Merges the training and the test sets to create one data set. ###########
###################################################################################
#set working directory
#setwd("C:/working/directory/") #dummy working directory
#X test and training sets (561 feature dataset)
df_test <- read.table("UCI HAR Dataset/test/X_test.txt")
df_train <- read.table("UCI HAR Dataset/train/X_train.txt")
df <- rbind(df_test, df_train)
#subject ID test and training sets
df_subtest <- read.table("UCI HAR Dataset/test/subject_test.txt")
df_subtrain <- read.table("UCI HAR Dataset/train/subject_train.txt")
df_sub <- rbind(df_subtest, df_subtrain)
#name the column "Subject"
colnames(df_sub) <- c("Subject")
#Y test and training sets (activities)
df_ytest <- read.table("UCI HAR Dataset/test/y_test.txt")
df_ytrain <- read.table("UCI HAR Dataset/train/y_train.txt")
df_act <- rbind(df_ytest, df_ytrain)
#name the column "Activity"
colnames(df_act) <- c("Activity")
#merge datasets (Final dataset for Part 1)
df_full <- cbind(df,df_sub,df_act) # 1st through 561st columns are features,
# 562nd column is subjects, 563rd column is
# activities
###################################################################################
###### 2. Extracts only the measurements on the mean and standard deviation ######
###### for each measurement. ######
###################################################################################
# extract only the measurements on the mean and standard deviation for
# measurements
# Interpreted as any variable containing "std", "mean" or "Mean"
#load in features.txt
features <- read.table("UCI HAR Dataset/features.txt")
#filter based on containing one of "std", "mean" or "Mean"
meanAndStdDevCols <- filter(features, grepl("mean|Mean|std",V2))[,1]
meanAndStdDevCols <- c(meanAndStdDevCols,562,563) #add on Subject and Activity columns
meanAndStdDevSubAct <- df_full[meanAndStdDevCols]
#subsetted dataset (Final dataset for Part 2)
#Note: Includes subject and activity columns
meanAndStdDev <- df_full[meanAndStdDevCols]
###################################################################################
###### 3. Uses descriptive activity names to name the activities in the data set ##
###################################################################################
#use activity labels dataset to label activities
activities <- read.table("UCI HAR Dataset/activity_labels.txt")
#replace the numbers using the match on activity # to activity name
meanAndStdDev$Activity <- activities[,2][match(meanAndStdDev$Activity, activities[,1])]
###################################################################################
###### 4. Appropriately labels the data set with descriptive variable names. #####
###################################################################################
#variable labels are in the "features" dataframe
#label columns with features dataframe
column_names <- c(as.vector(features[,2]),"Subject","Activity")
#add "descriptive" column names using string replacement
#Acc -> Acceleration
column_names <- gsub("Acc","Acceleration",column_names)
#Mag -> Magnitude
column_names <- gsub("Mag","Magnitude",column_names)
#Freq -> Frequency
column_names <- gsub("Freq","Frequency",column_names)
#mean -> Mean
column_names <- gsub("mean","Mean",column_names)
#std -> StandardDeviation
column_names <- gsub("std","StandardDeviation",column_names)
#Remove "-" and "()"
column_names <- gsub("-","",column_names)
column_names <- gsub("\\(","",column_names)
column_names <- gsub("\\)","",column_names)
#label full dataset with modified column names
colnames(df_full) <- column_names
#filter based on containing one of "std", "mean" or "Mean"
meanAndStdDevCols <- filter(features, grepl("mean|Mean|std",V2))[,1]
meanAndStdDevCols <- c(meanAndStdDevCols,562,563) #add on Subject and Activity columns
#final labeled dataset
meanAndStdDevSubAct <- df_full[meanAndStdDevCols]
###################################################################################
###### 5. From the data set in step 4, creates a second, independent tidy data ####
###### set with the average of each variable for each activity and each ####
###### subject. ####
###################################################################################
#use dplyr to group by activity and subject and take mean of each variable
library(dplyr)
#take mean of these columns for each subject and activity
df_final <- meanAndStdDevSubAct %>% group_by(Subject,Activity) %>% summarise_each(funs(mean))
#write final table
write.table(df_final,file="final.txt", row.name=FALSE)
|
testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p1 = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -1998.43889941466, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72131968218895e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result) | /metacoder/inst/testfiles/intersect_line_rectangle/AFL_intersect_line_rectangle/intersect_line_rectangle_valgrind_files/1615768596-test.R | permissive | akhikolla/updatedatatype-list3 | R | false | false | 723 | r | testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p1 = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -1998.43889941466, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72131968218895e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result) |
test_that("basic", {
full_names <- c("yy.y/xxx/123/A_1.tem", "yy.y/xxx/124/B_1.tem")
expected <- c("123/A_1", "124/B_1")
test <- unique_names(full_names)
expect_equal(unname(test), expected)
expect_equal(names(test), full_names)
expected <- c("3/A", "4/B")
test <- unique_names(full_names, breaks = "")
expect_equal(unname(test), expected)
expect_equal(names(test), full_names)
expected <- c("123/A_1.tem", "124/B_1.tem")
test <- unique_names(full_names, remove_ext = FALSE)
expect_equal(unname(test), expected)
expect_equal(names(test), full_names)
})
| /tests/testthat/test-unique_names.R | permissive | debruine/webmorphR | R | false | false | 582 | r | test_that("basic", {
full_names <- c("yy.y/xxx/123/A_1.tem", "yy.y/xxx/124/B_1.tem")
expected <- c("123/A_1", "124/B_1")
test <- unique_names(full_names)
expect_equal(unname(test), expected)
expect_equal(names(test), full_names)
expected <- c("3/A", "4/B")
test <- unique_names(full_names, breaks = "")
expect_equal(unname(test), expected)
expect_equal(names(test), full_names)
expected <- c("123/A_1.tem", "124/B_1.tem")
test <- unique_names(full_names, remove_ext = FALSE)
expect_equal(unname(test), expected)
expect_equal(names(test), full_names)
})
|
library(DescTools)
### Name: StrTrunc
### Title: Truncate Strings and Add Ellipses If a String is Truncated.
### Aliases: StrTrunc
### Keywords: character utilities
### ** Examples
set.seed(1789)
x <- sapply(seq(10), function(x) paste(sample(letters, sample(20,1)),collapse=""))
x
StrTrunc(x, maxlen=10)
# right justification
formatC(StrTrunc(x, maxlen=10), width = 10, flag=" ")
| /data/genthat_extracted_code/DescTools/examples/StrTrunc.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 390 | r | library(DescTools)
### Name: StrTrunc
### Title: Truncate Strings and Add Ellipses If a String is Truncated.
### Aliases: StrTrunc
### Keywords: character utilities
### ** Examples
set.seed(1789)
x <- sapply(seq(10), function(x) paste(sample(letters, sample(20,1)),collapse=""))
x
StrTrunc(x, maxlen=10)
# right justification
formatC(StrTrunc(x, maxlen=10), width = 10, flag=" ")
|
library(plotly)
library(rjson)
####################################### Provide Path and Files Name ###############
FilePath <- "" # Provide Working Directory
LableFile <- "InputFiles/Test_400_Lables(7to10).csv"
LinksFile <- "InputFiles/Test_400_Links(7to10).csv"
###json File Template ################################
json_data <- fromJSON(file = paste(FilePath,"Template-Json/jsonTemplateForSankey.json", sep =""))
###################################################################################
labels <- (read.csv(paste(FilePath, LableFile, sep =""), sep=",", header = TRUE))
links <- (read.csv(paste(FilePath, LinksFile, sep =""), sep=",", header = TRUE))
# Check if the labels and thier links are of same length
if(ncol(labels)/2 == ncol(links)) {
##########################################################
# Collect Labels For Sankey json File
names <- NULL
colors <- NULL
for( i in 1:ncol(labels) ) {
if((i %% 2) == 0) {
colors <- c(colors, (labels[,i]))
}
else{
names <- c(names, (labels[,i]))
}
}
newNames <- names[names != ""]
newColor <- colors[colors != ""]
########################################################
# Rearrange the nodegrouping number in the order for sankey
newLinks <- NULL
for( n in 1:ncol(links) ) {
if( n == 1) {
links <- links[order(as.numeric(links[,n])),]
newLinks <- cbind(newLinks, links[,n]-1)
}
else{
modules <- links[,n]
modules <- modules + max(newLinks[,n-1])
newLinks <- cbind(newLinks, modules)
}
}
######################################################
# Collect Links For Sankey json File
source <- NULL
target <- NULL
value <- NULL
for( j in 1:(n-1) ) {
if( j == 1) {
col <- (newLinks[,j])
source <- rbind(source, col)
target <- rbind(target, newLinks[,j+1])
value <- rbind(value, rep(1, times = length(col)))
}
else {
modules <- sort(unique(newLinks[,j]))
for( k in 1:length(modules)) {
modulesNew <- newLinks[which(newLinks[,j] == modules[k]),]
if(!is.null(dim(modulesNew)))
{
tab <- table(modulesNew[,j+1])
names <- names(tab)
for (l in 1:length(tab)) {
source <- cbind( source, modules[k])
target <- cbind(target, names[l])
value <- cbind(value, tab[[l]])
}
} else {
source <- cbind( source, modulesNew[j])
target <- cbind(target, modulesNew[j+1])
value <- cbind(value, 1)
}
}
}
}
} else {print("The Sankey Plot Columns and Names Mismatch")}
######## Creating a Json File ##################
json_data$data[[1]]$node$label <- newNames
json_data$data[[1]]$node$color <- newColor
json_data$data[[1]]$link$source <- c(source)
json_data$data[[1]]$link$target <- c(as.double(target))
json_data$data[[1]]$link$value <- c(value)
#print(json_data)
############ Sankey Plot Code ########################################
if( length(json_data$data[[1]]$link$source) == length(json_data$data[[1]]$link$target) &&
length(json_data$data[[1]]$link$target) == length(json_data$data[[1]]$link$value))
{
if(length(json_data$data[[1]]$node$label) == length(json_data$data[[1]]$node$color))
{
################ Sankey Plot Code Here ############################
sankey <- plot_ly(
type = "sankey",
domain = list(
x = c(0,1),
y = c(0,1)
),
orientation = "h",
valueformat = ".0f",
valuesuffix = " Links",
node = list(
label = json_data$data[[1]]$node$label,
color = json_data$data[[1]]$node$color,
pad = 1,
thickness = 10,
line = list(
color = "black",
width = 1
)
),
link = list(
source = json_data$data[[1]]$link$source,
target = json_data$data[[1]]$link$target,
value = json_data$data[[1]]$link$value,
label = json_data$data[[1]]$link$label
)
) %>%
layout(
title = "",
font = list(
size = 8
)
)
sankey
##########################################################
}else { print ("Mismatching labels and colors") }
}else {print ("Mismatching links length")}
| /Sankey_AutomatedCode.R | no_license | vrrani/SankeyPlot | R | false | false | 4,491 | r | library(plotly)
library(rjson)
####################################### Provide Path and Files Name ###############
FilePath <- "" # Provide Working Directory
LableFile <- "InputFiles/Test_400_Lables(7to10).csv"
LinksFile <- "InputFiles/Test_400_Links(7to10).csv"
###json File Template ################################
json_data <- fromJSON(file = paste(FilePath,"Template-Json/jsonTemplateForSankey.json", sep =""))
###################################################################################
labels <- (read.csv(paste(FilePath, LableFile, sep =""), sep=",", header = TRUE))
links <- (read.csv(paste(FilePath, LinksFile, sep =""), sep=",", header = TRUE))
# Check if the labels and thier links are of same length
if(ncol(labels)/2 == ncol(links)) {
##########################################################
# Collect Labels For Sankey json File
names <- NULL
colors <- NULL
for( i in 1:ncol(labels) ) {
if((i %% 2) == 0) {
colors <- c(colors, (labels[,i]))
}
else{
names <- c(names, (labels[,i]))
}
}
newNames <- names[names != ""]
newColor <- colors[colors != ""]
########################################################
# Rearrange the nodegrouping number in the order for sankey
newLinks <- NULL
for( n in 1:ncol(links) ) {
if( n == 1) {
links <- links[order(as.numeric(links[,n])),]
newLinks <- cbind(newLinks, links[,n]-1)
}
else{
modules <- links[,n]
modules <- modules + max(newLinks[,n-1])
newLinks <- cbind(newLinks, modules)
}
}
######################################################
# Collect Links For Sankey json File
source <- NULL
target <- NULL
value <- NULL
for( j in 1:(n-1) ) {
if( j == 1) {
col <- (newLinks[,j])
source <- rbind(source, col)
target <- rbind(target, newLinks[,j+1])
value <- rbind(value, rep(1, times = length(col)))
}
else {
modules <- sort(unique(newLinks[,j]))
for( k in 1:length(modules)) {
modulesNew <- newLinks[which(newLinks[,j] == modules[k]),]
if(!is.null(dim(modulesNew)))
{
tab <- table(modulesNew[,j+1])
names <- names(tab)
for (l in 1:length(tab)) {
source <- cbind( source, modules[k])
target <- cbind(target, names[l])
value <- cbind(value, tab[[l]])
}
} else {
source <- cbind( source, modulesNew[j])
target <- cbind(target, modulesNew[j+1])
value <- cbind(value, 1)
}
}
}
}
} else {print("The Sankey Plot Columns and Names Mismatch")}
######## Creating a Json File ##################
json_data$data[[1]]$node$label <- newNames
json_data$data[[1]]$node$color <- newColor
json_data$data[[1]]$link$source <- c(source)
json_data$data[[1]]$link$target <- c(as.double(target))
json_data$data[[1]]$link$value <- c(value)
#print(json_data)
############ Sankey Plot Code ########################################
if( length(json_data$data[[1]]$link$source) == length(json_data$data[[1]]$link$target) &&
length(json_data$data[[1]]$link$target) == length(json_data$data[[1]]$link$value))
{
if(length(json_data$data[[1]]$node$label) == length(json_data$data[[1]]$node$color))
{
################ Sankey Plot Code Here ############################
sankey <- plot_ly(
type = "sankey",
domain = list(
x = c(0,1),
y = c(0,1)
),
orientation = "h",
valueformat = ".0f",
valuesuffix = " Links",
node = list(
label = json_data$data[[1]]$node$label,
color = json_data$data[[1]]$node$color,
pad = 1,
thickness = 10,
line = list(
color = "black",
width = 1
)
),
link = list(
source = json_data$data[[1]]$link$source,
target = json_data$data[[1]]$link$target,
value = json_data$data[[1]]$link$value,
label = json_data$data[[1]]$link$label
)
) %>%
layout(
title = "",
font = list(
size = 8
)
)
sankey
##########################################################
}else { print ("Mismatching labels and colors") }
}else {print ("Mismatching links length")}
|
#Task2(a)
data=read.csv("spotify.csv");
head(data)
density((data$Popularity))
plot(density(data$Popularity))
plot(density(data$Popularity, bw= 3))
density((data$Beats.Per.Minute))
plot(density(data$Beats.Per.Minute))
plot(density(data$Beats.Per.Minute, bw= 24))
plot(density(data$Beats.Per.Minute, bw= 0.05))
density((data$Energy))
plot(density(data$Energy))
plot(density(data$Energy, bw= 10))
plot(density(data$Energy, bw= 0.05))
density((data$Danceability))
plot(density(data$Danceability))
plot(density(data$Danceability, bw= 8))
plot(density(data$Danceability, bw= 0.05))
density((data$Loudness..dB..))
plot(density(data$Loudness..dB..))
plot(density(data$Loudness..dB.., bw= 0.05))
plot(density(data$Loudness..dB.., bw= 1.6))
density((data$Liveness))
plot(density(data$Liveness))
plot(density(data$Liveness, bw= 0.05))
plot(density(data$Liveness, bw= 4.7))
density((data$Valence.))
plot(density(data$Valence))
plot(density(data$Valence., bw= 0.05))
plot(density(data$Valence., bw= 18))
density((data$Length.))
plot(density(data$Length.))
plot(density(data$Length., bw= 0.05))
plot(density(data$Length., bw= 24))
density((data$Acousticness..))
plot(density(data$Acousticness..))
plot(density(data$Acousticness.., bw= 16))
plot(density(data$Acousticness.., bw= 0.05))
density((data$Speechiness.))
plot(density(data$Speechiness.))
plot(density(data$Speechiness., bw= 0.05))
plot(density(data$Speechiness., bw= 3))
#Task2(b)
data=read.csv("spotify.csv");
head(data)
plot(density(data$Popularity), log='x')
plot(density(data$Danceability), log='x')
plot(density(data$Energy), log='x')
plot(density(data$Length.), log='x')
plot(density(data$Beats.Per.Minute), log='x')
plot(density(data$Liveness), log='x')
plot(density(data$Acousticness..), log='x')
plot(density(data$Speechiness.), log='x')
plot(density(data$Loudness..dB..), log='x')
plot(density(data$Valence.), log='x')
| /top50spotifysong(density plot).R | no_license | Nuramaninaa/top50spotifysong | R | false | false | 2,056 | r |
#Task2(a)
data=read.csv("spotify.csv");
head(data)
density((data$Popularity))
plot(density(data$Popularity))
plot(density(data$Popularity, bw= 3))
density((data$Beats.Per.Minute))
plot(density(data$Beats.Per.Minute))
plot(density(data$Beats.Per.Minute, bw= 24))
plot(density(data$Beats.Per.Minute, bw= 0.05))
density((data$Energy))
plot(density(data$Energy))
plot(density(data$Energy, bw= 10))
plot(density(data$Energy, bw= 0.05))
density((data$Danceability))
plot(density(data$Danceability))
plot(density(data$Danceability, bw= 8))
plot(density(data$Danceability, bw= 0.05))
density((data$Loudness..dB..))
plot(density(data$Loudness..dB..))
plot(density(data$Loudness..dB.., bw= 0.05))
plot(density(data$Loudness..dB.., bw= 1.6))
density((data$Liveness))
plot(density(data$Liveness))
plot(density(data$Liveness, bw= 0.05))
plot(density(data$Liveness, bw= 4.7))
density((data$Valence.))
plot(density(data$Valence))
plot(density(data$Valence., bw= 0.05))
plot(density(data$Valence., bw= 18))
density((data$Length.))
plot(density(data$Length.))
plot(density(data$Length., bw= 0.05))
plot(density(data$Length., bw= 24))
density((data$Acousticness..))
plot(density(data$Acousticness..))
plot(density(data$Acousticness.., bw= 16))
plot(density(data$Acousticness.., bw= 0.05))
density((data$Speechiness.))
plot(density(data$Speechiness.))
plot(density(data$Speechiness., bw= 0.05))
plot(density(data$Speechiness., bw= 3))
#Task2(b)
data=read.csv("spotify.csv");
head(data)
plot(density(data$Popularity), log='x')
plot(density(data$Danceability), log='x')
plot(density(data$Energy), log='x')
plot(density(data$Length.), log='x')
plot(density(data$Beats.Per.Minute), log='x')
plot(density(data$Liveness), log='x')
plot(density(data$Acousticness..), log='x')
plot(density(data$Speechiness.), log='x')
plot(density(data$Loudness..dB..), log='x')
plot(density(data$Valence.), log='x')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pprof.R
\name{serve_rprof}
\alias{serve_rprof}
\title{Visualize Rprof() output with pprof.}
\usage{
serve_rprof(
rprof,
host = "localhost",
port = proffer::random_port(),
browse = interactive(),
verbose = TRUE
)
}
\arguments{
\item{rprof}{Path to profiling samples generated
by \code{Rprof()} or \code{\link[=record_rprof]{record_rprof()}}.}
\item{host}{Host name. Set to \code{"localhost"} to view locally
or \code{"0.0.0.0"} to view from another machine. If you view
from another machine, the printed out URL will not be valid.
For example, if \code{pprof()} or \code{serve_pprof()} prints
"http://0.0.0.0:8080", then you need to replace 0.0.0.0
with your computer's name or IP address, e.g.
"http://my_computer.com:8080".}
\item{port}{Port number for hosting the local pprof server.
Chosen randomly by default.}
\item{browse}{Logical, whether to open a browser to view
the pprof server.}
\item{verbose}{Logical, whether to print console messages
such as the URL of the local \code{pprof} server.}
}
\value{
A \code{processx::process$new()} handle. Use this handle
to take down the server with \verb{$kill()}.
}
\description{
Use pprof to visualize profiling data
produced by \code{Rprof()} or \code{\link[=record_rprof]{record_rprof()}}.
}
\details{
Uses a local interactive server.
Navigate a browser to a URL in the message.
The server starts in a background process
}
\examples{
if (identical(Sys.getenv("PROFFER_EXAMPLES"), "true")) {
rprof <- record_rprof(replicate(1e2, sample.int(1e4)))
# Start a pprof virtual server in the background.
px <- serve_rprof(rprof)
# Terminate the server.
px$kill()
}
}
| /man/serve_rprof.Rd | no_license | cran/proffer | R | false | true | 1,702 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pprof.R
\name{serve_rprof}
\alias{serve_rprof}
\title{Visualize Rprof() output with pprof.}
\usage{
serve_rprof(
rprof,
host = "localhost",
port = proffer::random_port(),
browse = interactive(),
verbose = TRUE
)
}
\arguments{
\item{rprof}{Path to profiling samples generated
by \code{Rprof()} or \code{\link[=record_rprof]{record_rprof()}}.}
\item{host}{Host name. Set to \code{"localhost"} to view locally
or \code{"0.0.0.0"} to view from another machine. If you view
from another machine, the printed out URL will not be valid.
For example, if \code{pprof()} or \code{serve_pprof()} prints
"http://0.0.0.0:8080", then you need to replace 0.0.0.0
with your computer's name or IP address, e.g.
"http://my_computer.com:8080".}
\item{port}{Port number for hosting the local pprof server.
Chosen randomly by default.}
\item{browse}{Logical, whether to open a browser to view
the pprof server.}
\item{verbose}{Logical, whether to print console messages
such as the URL of the local \code{pprof} server.}
}
\value{
A \code{processx::process$new()} handle. Use this handle
to take down the server with \verb{$kill()}.
}
\description{
Use pprof to visualize profiling data
produced by \code{Rprof()} or \code{\link[=record_rprof]{record_rprof()}}.
}
\details{
Uses a local interactive server.
Navigate a browser to a URL in the message.
The server starts in a background process
}
\examples{
if (identical(Sys.getenv("PROFFER_EXAMPLES"), "true")) {
rprof <- record_rprof(replicate(1e2, sample.int(1e4)))
# Start a pprof virtual server in the background.
px <- serve_rprof(rprof)
# Terminate the server.
px$kill()
}
}
|
head(Smiles)
mean(Leniency ~ Group, data = Smiles)
diffmean(Leniency ~ Group, data = Smiles)
| /inst/snippets/Example4.27.R | no_license | klaassenj/Lock5withR | R | false | false | 94 | r | head(Smiles)
mean(Leniency ~ Group, data = Smiles)
diffmean(Leniency ~ Group, data = Smiles)
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% ProbeLevelModel.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{getChipEffectSet.ProbeLevelModel}
\alias{getChipEffectSet.ProbeLevelModel}
\alias{ProbeLevelModel.getChipEffectSet}
\alias{getChipEffectSet,ProbeLevelModel-method}
\alias{ProbeLevelModel.getChipEffects}
\alias{getChipEffects.ProbeLevelModel}
\alias{getChipEffects,ProbeLevelModel-method}
\title{Gets the set of chip effects for this model}
\description{
Gets the set of chip effects for this model.
There is one chip-effect file per array.
}
\usage{
\method{getChipEffectSet}{ProbeLevelModel}(this, ..., verbose=FALSE)
}
\arguments{
\item{...}{Arguments passed to \code{getMonocellCdf()} of
\code{\link{AffymetrixCdfFile}}.}
\item{verbose}{A \code{\link[base]{logical}} or a \code{\link[R.utils]{Verbose}}.}
}
\value{
Returns a \code{\link{ChipEffectSet}} object.
}
\seealso{
For more information see \code{\link{ProbeLevelModel}}.
}
\keyword{internal}
\keyword{methods}
| /man/getChipEffectSet.ProbeLevelModel.Rd | no_license | HenrikBengtsson/aroma.affymetrix | R | false | false | 1,238 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% ProbeLevelModel.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{getChipEffectSet.ProbeLevelModel}
\alias{getChipEffectSet.ProbeLevelModel}
\alias{ProbeLevelModel.getChipEffectSet}
\alias{getChipEffectSet,ProbeLevelModel-method}
\alias{ProbeLevelModel.getChipEffects}
\alias{getChipEffects.ProbeLevelModel}
\alias{getChipEffects,ProbeLevelModel-method}
\title{Gets the set of chip effects for this model}
\description{
Gets the set of chip effects for this model.
There is one chip-effect file per array.
}
\usage{
\method{getChipEffectSet}{ProbeLevelModel}(this, ..., verbose=FALSE)
}
\arguments{
\item{...}{Arguments passed to \code{getMonocellCdf()} of
\code{\link{AffymetrixCdfFile}}.}
\item{verbose}{A \code{\link[base]{logical}} or a \code{\link[R.utils]{Verbose}}.}
}
\value{
Returns a \code{\link{ChipEffectSet}} object.
}
\seealso{
For more information see \code{\link{ProbeLevelModel}}.
}
\keyword{internal}
\keyword{methods}
|
oneway <- function(y, x, posthoc=NULL, means=FALSE, fullDescribe=FALSE,
levene=FALSE, plot=FALSE, digits=2,
omegasq = TRUE,
etasq = TRUE,
corrections = FALSE,
pvalueDigits=3, t=FALSE, conf.level=.95,
silent=FALSE) {
res <- list(input = as.list(environment()));
res$input$x.name <- extractVarName(deparse(substitute(x)));
res$input$y.name <- extractVarName(deparse(substitute(y)));
if (!is.numeric(y)) {
stop("The y variable (", res$input$y.name, ") is not a numeric ",
"vector! Note that in analysis of variance, the 'y' variable ",
"must have at least the interval level of measurement!");
}
if (!is.factor(x)) {
if (!silent) {
warning("### Warning: the x variable (", res$input$x.name, ") is not a ",
"factor! Converting it myself - but note that variables in R have ",
"data types, and it's advisable to set these adequately (use for ",
"example 'as.factor'; see '?as.factor' for help)!");
}
res$input$x.raw <- x;
x <- as.factor(x);
res$input$x <- x;
}
assign(res$input$x.name, x);
assign(res$input$y.name, y);
res$intermediate <- list();
res$intermediate$aov <- aov(formula(paste0(res$input$y.name, " ~ ",
res$input$x.name)));
res$intermediate$Anova <- Anova(res$intermediate$aov, type=3);
if (!is.null(posthoc)) {
if (tolower(posthoc)=="tukey") {
res$intermediate$posthoc <- TukeyHSD(res$intermediate$aov);
}
else if (tolower(posthoc)=="games-howell") {
res$intermediate$posthocTGH <- posthocTGH(y=y, x=x, method="Games-Howell");
res$intermediate$posthoc <- res$intermediate$posthocTGH$output$games.howell;
}
else {
res$intermediate$posthoc <-
pairwise.t.test(x=y, g=x, p.adjust.method=posthoc);
}
}
if (means) {
res$intermediate$means <- describeBy(y, x);
tmpAttributes <- attributes(res$intermediate$means);
res$intermediate$means <- lapply(res$intermediate$means, function(x) {
class(x) <- 'data.frame';
rownames(x)[1] <- ' ';
return(x[, colnames(x) != 'vars']);
});
if (!fullDescribe) {
res$intermediate$means <- lapply(res$intermediate$means, function(x) {
return(x[, colnames(x) %in% c('n', 'mean', 'sd', 'se', 'median')]);
});
}
if (t) {
res$intermediate$means <- lapply(res$intermediate$means, t);
}
attributes(res$intermediate$means) <- tmpAttributes;
}
if (levene) {
res$intermediate$leveneTest <- leveneTest(y, group=x, center=mean);
}
res$intermediate$etasq <- computeEffectSize_etasq(var1=x, var2=y,
conf.level=conf.level);
res$intermediate$confIntOmegaSq <- confIntOmegaSq(var1=x, var2=y,
conf.level=conf.level);
res$output <- list(etasq = res$intermediate$Anova$`Sum Sq`[2] /
sum(res$intermediate$Anova$`Sum Sq`[2:3]),
etasq.ci = res$intermediate$etasq$ci,
omegasq = res$intermediate$confIntOmegaSq$output$es,
omegasq.ci = res$intermediate$confIntOmegaSq$output$ci);
res$output$dat <- data.frame(SS = res$intermediate$Anova$`Sum Sq`[2:3],
Df = res$intermediate$Anova$Df[2:3]);
res$output$dat$MS <- res$output$dat$SS / res$output$dat$Df;
res$output$dat[1, 'F'] <- res$intermediate$Anova$F[2];
res$output$dat[1, 'p'] <- res$intermediate$Anova$`Pr(>F)`[2];
row.names(res$output$dat) <- c('Between groups (error + effect)',
'Within groups (error only)');
if (corrections) {
res$intermediate$welch <- oneway.test(formula(paste0(res$input$y.name,
" ~ ",
res$input$x.name)));
### Based on Brown & Forsythe (1974), found through Field (2014)
SSm <- res$output$dat['Between groups (error + effect)', 'SS'];
tmpDat <- na.omit(data.frame(x=x, y=y));
groupVariances <- as.numeric(by(tmpDat$y, tmpDat$x, var));
groupSizes <- as.numeric(by(tmpDat$y, tmpDat$x, length));
denominator <- sum(groupVariances * (1 - ( groupSizes / sum(groupSizes))));
res$intermediate$brown.forsythe <- list();
res$intermediate$brown.forsythe$F <- SSm / denominator;
res$intermediate$brown.forsythe$Df1 <- length(groupSizes) - 1;
cValues <- ((1 - ( groupSizes / sum(groupSizes))) * groupVariances) /
(sum( (1 - ( groupSizes / sum(groupSizes))) * groupVariances ));
inverseDf2 <- sum(cValues^2 / (groupSizes - 1));
res$intermediate$brown.forsythe$Df2 <- 1 / inverseDf2;
res$intermediate$brown.forsythe$p <- pf(res$intermediate$brown.forsythe$F,
res$intermediate$brown.forsythe$Df1,
res$intermediate$brown.forsythe$Df2,
lower.tail=FALSE);
}
if (plot) {
res$intermediate$dat <- data.frame(x, y);
names(res$intermediate$dat) <- c(res$input$x.name, res$input$y.name);
res$output$plot <- dlvPlot(res$intermediate$dat,
x=res$input$x.name,
y=res$input$y.name)$plot +
ggtitle(paste0(res$input$x.name, " and ",
res$input$y.name));
}
class(res) <- 'oneway';
return(res);
}
print.oneway <- function(x, digits=x$input$digits,
pvalueDigits=x$input$pvalueDigits,
na.print="", ...) {
if (x$input$plot) {
print(x$output$plot);
}
cat(paste0("### Oneway Anova for y=", x$input$y.name,
" and x=", x$input$x.name, " (groups: ",
paste0(levels(x$input$x), collapse=", "),
")\n\n"));
if (x$input$omegasq) {
print(x$intermediate$confIntOmegaSq, digits=digits);
cat('\n');
}
if (x$input$etasq) {
cat(paste0("Eta Squared: ", round(x$input$conf.level * 100),
"% CI = [", formatR(x$output$etasq.ci[1], digits=digits),
"; ", formatR(x$output$etasq.ci[2], digits=digits),
"], point estimate = ", formatR(x$output$etasq, digits=digits), "\n"));
}
if (x$input$omegasq | x$input$etasq) {
cat('\n');
}
x$output$dat[, 1:4] <- round(x$output$dat[, 1:4], digits);
### Format p-values nicely
x$output$dat$p <- formatPvalue(x$output$dat$p,
digits=pvalueDigits,
includeP=FALSE);
### Temporarily store row names and transform everything to character
tmpRowNames <- row.names(x$output$dat);
x$output$dat <- data.frame(lapply(x$output$dat, as.character));
row.names(x$output$dat) <- tmpRowNames;
if(x$input$t) {
print(t(x$output$dat), na.print=na.print, quote=FALSE);
} else {
print(x$output$dat, na.print=na.print, quote=FALSE);
}
cat("\n");
if (x$input$means) {
cat(paste0("### Means for y (", x$input$y.name, ") separate for each level of x (", x$input$x.name, "):\n"));
lapply(1:length(x$intermediate$means), function(index) {
cat0("\n", x$input$x.name, " = ", names(x$intermediate$means[index]), ":\n");
print(x$intermediate$means[[index]], digits=digits);
});
}
if (x$input$levene) {
cat0("\n### Levene's test for homogeneity of variance:\n\n",
"F[", x$intermediate$leveneTest[1, 1],
", ", x$intermediate$leveneTest[2, 1],
"] = ", round(x$intermediate$leveneTest[1, 2], digits),
", ", formatPvalue(x$intermediate$leveneTest[1, 3], digits=digits+1),
".\n");
}
if (!is.null(x$input$posthoc)) {
cat0("\n### Post hoc test: ", x$input$posthoc,"\n\n");
if (x$input$posthoc %IN% c('games-howell')) {
x$intermediate$posthoc <- as.data.frame(x$intermediate$posthoc);
x$intermediate$posthoc[, 1:(ncol(x$intermediate$posthoc)-1)] <-
round(x$intermediate$posthoc[, 1:(ncol(x$intermediate$posthoc)-1)], digits);
x$intermediate$posthoc[, ncol(x$intermediate$posthoc)] <-
formatPvalue(x$intermediate$posthoc[, ncol(x$intermediate$posthoc)], digits=digits+1, includeP=FALSE);
print(x$intermediate$posthoc, quote=FALSE);
}
else if (x$input$posthoc %IN% c('tukey')) {
x$intermediate$posthoc <- lapply(x$intermediate$posthoc, function(x) {
x[, 1:(ncol(x)-1)] <- round(x[, 1:(ncol(x)-1)], digits);
x[, ncol(x)] <- formatPvalue(x[,ncol(x)], digits=digits+1, includeP=FALSE);
return(x);
});
print(x$intermediate$posthoc[[1]], quote=FALSE);
}
else {
x$intermediate$posthoc$p.value <- formatPvalue(x$intermediate$posthoc$p.value, digits=pvalueDigits, includeP=FALSE);
print(x$intermediate$posthoc$p.value, quote=FALSE, na.print="");
}
}
if (x$input$corrections) {
cat0("\n### Welch correction for nonhomogeneous variances:\n\n",
"F[", x$intermediate$welch$parameter[1],
", ", round(x$intermediate$welch$parameter[2], digits),
"] = ", round(x$intermediate$welch$statistic, digits),
", ", formatPvalue(x$intermediate$welch$p.value, digits=digits+1),
".\n");
cat0("\n### Brown-Forsythe correction for nonhomogeneous variances:\n\n",
"F[", x$intermediate$brown.forsythe$Df1,
", ", round(x$intermediate$brown.forsythe$Df2, digits),
"] = ", round(x$intermediate$brown.forsythe$F, digits),
", ", formatPvalue(x$intermediate$brown.forsythe$p, digits=digits+1),
".\n");
}
}
pander.oneway <- function(x, digits=x$input$digits,
pvalueDigits=x$input$pvalueDigits,
headerStyle = "**",
na.print="", ...) {
cat0("\n\n", headerStyle, "Oneway Anova for y=", x$input$y.name,
" and x=", x$input$x.name, " (groups: ",
vecTxt(levels(x$input$x)),
")", headerStyle, "\n\n");
if (x$input$omegasq) {
print(x$intermediate$confIntOmegaSq, digits=digits);
cat(' \n');
}
if (x$input$etasq) {
cat0("Eta Squared: ", round(x$input$conf.level * 100),
"% CI = [", formatR(x$output$etasq.ci[1], digits=digits),
"; ", formatR(x$output$etasq.ci[2], digits=digits),
"], point estimate = ", formatR(x$output$etasq, digits=digits));
}
if (x$input$omegasq | x$input$etasq) {
cat('\n\n');
}
x$output$dat[, 1:4] <- round(x$output$dat[, 1:4], digits);
### Format p-values nicely
x$output$dat$p <- formatPvalue(x$output$dat$p,
digits=pvalueDigits,
includeP=FALSE);
### Temporarily store row names and transform everything to character
tmpRowNames <- row.names(x$output$dat);
x$output$dat <- data.frame(lapply(x$output$dat, as.character));
row.names(x$output$dat) <- tmpRowNames;
if(x$input$t) {
pander(t(x$output$dat), missing="");
} else {
pander(x$output$dat, missing="");
}
if (x$input$plot) {
grid.draw(x$output$plot);
}
if (x$input$means) {
cat0("\n\n", headerStyle, "Means for y (", x$input$y.name,
") separate for each level of x (", x$input$x.name, "):",
headerStyle);
lapply(1:length(x$intermediate$means), function(index) {
cat0("\n\n", x$input$x.name, " = ",
names(x$intermediate$means[index]), ": \n\n");
pander(x$intermediate$means[[index]], digits=digits);
});
cat("\n");
}
if (x$input$levene) {
cat0("\n\n", headerStyle, "Levene's test for homogeneity of variance:",
headerStyle, "\n\n",
"F<sub>", x$intermediate$leveneTest[1, 1],
", ", x$intermediate$leveneTest[2, 1],
"</sub> = ", round(x$intermediate$leveneTest[1, 2], digits),
", ", formatPvalue(x$intermediate$leveneTest[1, 3], digits=digits+1),
".\n");
}
if (!is.null(x$input$posthoc)) {
cat(paste0("\n\n", headerStyle, "Post hoc test: ", x$input$posthoc,
headerStyle, "\n\n"));
if (x$input$posthoc %IN% c('games-howell')) {
x$intermediate$posthoc <- as.data.frame(x$intermediate$posthoc);
x$intermediate$posthoc[, 1:(ncol(x$intermediate$posthoc)-1)] <-
round(x$intermediate$posthoc[, 1:(ncol(x$intermediate$posthoc)-1)], digits);
x$intermediate$posthoc[, ncol(x$intermediate$posthoc)] <-
formatPvalue(x$intermediate$posthoc[, ncol(x$intermediate$posthoc)], digits=digits+1, includeP=FALSE);
pander(x$intermediate$posthoc, missing="");
}
else if (x$input$posthoc %IN% c('tukey')) {
x$intermediate$posthoc <- lapply(x$intermediate$posthoc, function(x) {
x[, 1:(ncol(x)-1)] <- round(x[, 1:(ncol(x)-1)], digits);
x[, ncol(x)] <- formatPvalue(x[,ncol(x)], digits=digits+1, includeP=FALSE);
return(x);
});
pander(x$intermediate$posthoc[[1]], missing="");
}
else {
x$intermediate$posthoc$p.value <- formatPvalue(x$intermediate$posthoc$p.value, digits=pvalueDigits, includeP=FALSE);
pander(x$intermediate$posthoc$p.value, missing="");
}
}
if (x$input$corrections) {
cat0("\n\n", headerStyle, "Welch correction for nonhomogeneous variances:",
headerStyle, "\n\n",
"F<sub>", x$intermediate$welch$parameter[1],
", ", round(x$intermediate$welch$parameter[2], digits),
"</sub> = ", round(x$intermediate$welch$statistic, digits),
", ", formatPvalue(x$intermediate$welch$p.value, digits=digits+1),
".");
cat0("\n\n", headerStyle, "Brown-Forsythe correction for nonhomogeneous variances:",
headerStyle, "\n\n",
"F<sub>", x$intermediate$brown.forsythe$Df1,
", ", round(x$intermediate$brown.forsythe$Df2, digits),
"</sub> = ", round(x$intermediate$brown.forsythe$F, digits),
", ", formatPvalue(x$intermediate$brown.forsythe$p, digits=digits+1),
".\n");
}
cat("\n\n");
}
| /R/oneway.R | no_license | limegreen/userfriendlyscience | R | false | false | 14,193 | r | oneway <- function(y, x, posthoc=NULL, means=FALSE, fullDescribe=FALSE,
levene=FALSE, plot=FALSE, digits=2,
omegasq = TRUE,
etasq = TRUE,
corrections = FALSE,
pvalueDigits=3, t=FALSE, conf.level=.95,
silent=FALSE) {
res <- list(input = as.list(environment()));
res$input$x.name <- extractVarName(deparse(substitute(x)));
res$input$y.name <- extractVarName(deparse(substitute(y)));
if (!is.numeric(y)) {
stop("The y variable (", res$input$y.name, ") is not a numeric ",
"vector! Note that in analysis of variance, the 'y' variable ",
"must have at least the interval level of measurement!");
}
if (!is.factor(x)) {
if (!silent) {
warning("### Warning: the x variable (", res$input$x.name, ") is not a ",
"factor! Converting it myself - but note that variables in R have ",
"data types, and it's advisable to set these adequately (use for ",
"example 'as.factor'; see '?as.factor' for help)!");
}
res$input$x.raw <- x;
x <- as.factor(x);
res$input$x <- x;
}
assign(res$input$x.name, x);
assign(res$input$y.name, y);
res$intermediate <- list();
res$intermediate$aov <- aov(formula(paste0(res$input$y.name, " ~ ",
res$input$x.name)));
res$intermediate$Anova <- Anova(res$intermediate$aov, type=3);
if (!is.null(posthoc)) {
if (tolower(posthoc)=="tukey") {
res$intermediate$posthoc <- TukeyHSD(res$intermediate$aov);
}
else if (tolower(posthoc)=="games-howell") {
res$intermediate$posthocTGH <- posthocTGH(y=y, x=x, method="Games-Howell");
res$intermediate$posthoc <- res$intermediate$posthocTGH$output$games.howell;
}
else {
res$intermediate$posthoc <-
pairwise.t.test(x=y, g=x, p.adjust.method=posthoc);
}
}
if (means) {
res$intermediate$means <- describeBy(y, x);
tmpAttributes <- attributes(res$intermediate$means);
res$intermediate$means <- lapply(res$intermediate$means, function(x) {
class(x) <- 'data.frame';
rownames(x)[1] <- ' ';
return(x[, colnames(x) != 'vars']);
});
if (!fullDescribe) {
res$intermediate$means <- lapply(res$intermediate$means, function(x) {
return(x[, colnames(x) %in% c('n', 'mean', 'sd', 'se', 'median')]);
});
}
if (t) {
res$intermediate$means <- lapply(res$intermediate$means, t);
}
attributes(res$intermediate$means) <- tmpAttributes;
}
if (levene) {
res$intermediate$leveneTest <- leveneTest(y, group=x, center=mean);
}
res$intermediate$etasq <- computeEffectSize_etasq(var1=x, var2=y,
conf.level=conf.level);
res$intermediate$confIntOmegaSq <- confIntOmegaSq(var1=x, var2=y,
conf.level=conf.level);
res$output <- list(etasq = res$intermediate$Anova$`Sum Sq`[2] /
sum(res$intermediate$Anova$`Sum Sq`[2:3]),
etasq.ci = res$intermediate$etasq$ci,
omegasq = res$intermediate$confIntOmegaSq$output$es,
omegasq.ci = res$intermediate$confIntOmegaSq$output$ci);
res$output$dat <- data.frame(SS = res$intermediate$Anova$`Sum Sq`[2:3],
Df = res$intermediate$Anova$Df[2:3]);
res$output$dat$MS <- res$output$dat$SS / res$output$dat$Df;
res$output$dat[1, 'F'] <- res$intermediate$Anova$F[2];
res$output$dat[1, 'p'] <- res$intermediate$Anova$`Pr(>F)`[2];
row.names(res$output$dat) <- c('Between groups (error + effect)',
'Within groups (error only)');
if (corrections) {
res$intermediate$welch <- oneway.test(formula(paste0(res$input$y.name,
" ~ ",
res$input$x.name)));
### Based on Brown & Forsythe (1974), found through Field (2014)
SSm <- res$output$dat['Between groups (error + effect)', 'SS'];
tmpDat <- na.omit(data.frame(x=x, y=y));
groupVariances <- as.numeric(by(tmpDat$y, tmpDat$x, var));
groupSizes <- as.numeric(by(tmpDat$y, tmpDat$x, length));
denominator <- sum(groupVariances * (1 - ( groupSizes / sum(groupSizes))));
res$intermediate$brown.forsythe <- list();
res$intermediate$brown.forsythe$F <- SSm / denominator;
res$intermediate$brown.forsythe$Df1 <- length(groupSizes) - 1;
cValues <- ((1 - ( groupSizes / sum(groupSizes))) * groupVariances) /
(sum( (1 - ( groupSizes / sum(groupSizes))) * groupVariances ));
inverseDf2 <- sum(cValues^2 / (groupSizes - 1));
res$intermediate$brown.forsythe$Df2 <- 1 / inverseDf2;
res$intermediate$brown.forsythe$p <- pf(res$intermediate$brown.forsythe$F,
res$intermediate$brown.forsythe$Df1,
res$intermediate$brown.forsythe$Df2,
lower.tail=FALSE);
}
if (plot) {
res$intermediate$dat <- data.frame(x, y);
names(res$intermediate$dat) <- c(res$input$x.name, res$input$y.name);
res$output$plot <- dlvPlot(res$intermediate$dat,
x=res$input$x.name,
y=res$input$y.name)$plot +
ggtitle(paste0(res$input$x.name, " and ",
res$input$y.name));
}
class(res) <- 'oneway';
return(res);
}
print.oneway <- function(x, digits=x$input$digits,
pvalueDigits=x$input$pvalueDigits,
na.print="", ...) {
if (x$input$plot) {
print(x$output$plot);
}
cat(paste0("### Oneway Anova for y=", x$input$y.name,
" and x=", x$input$x.name, " (groups: ",
paste0(levels(x$input$x), collapse=", "),
")\n\n"));
if (x$input$omegasq) {
print(x$intermediate$confIntOmegaSq, digits=digits);
cat('\n');
}
if (x$input$etasq) {
cat(paste0("Eta Squared: ", round(x$input$conf.level * 100),
"% CI = [", formatR(x$output$etasq.ci[1], digits=digits),
"; ", formatR(x$output$etasq.ci[2], digits=digits),
"], point estimate = ", formatR(x$output$etasq, digits=digits), "\n"));
}
if (x$input$omegasq | x$input$etasq) {
cat('\n');
}
x$output$dat[, 1:4] <- round(x$output$dat[, 1:4], digits);
### Format p-values nicely
x$output$dat$p <- formatPvalue(x$output$dat$p,
digits=pvalueDigits,
includeP=FALSE);
### Temporarily store row names and transform everything to character
tmpRowNames <- row.names(x$output$dat);
x$output$dat <- data.frame(lapply(x$output$dat, as.character));
row.names(x$output$dat) <- tmpRowNames;
if(x$input$t) {
print(t(x$output$dat), na.print=na.print, quote=FALSE);
} else {
print(x$output$dat, na.print=na.print, quote=FALSE);
}
cat("\n");
if (x$input$means) {
cat(paste0("### Means for y (", x$input$y.name, ") separate for each level of x (", x$input$x.name, "):\n"));
lapply(1:length(x$intermediate$means), function(index) {
cat0("\n", x$input$x.name, " = ", names(x$intermediate$means[index]), ":\n");
print(x$intermediate$means[[index]], digits=digits);
});
}
if (x$input$levene) {
cat0("\n### Levene's test for homogeneity of variance:\n\n",
"F[", x$intermediate$leveneTest[1, 1],
", ", x$intermediate$leveneTest[2, 1],
"] = ", round(x$intermediate$leveneTest[1, 2], digits),
", ", formatPvalue(x$intermediate$leveneTest[1, 3], digits=digits+1),
".\n");
}
if (!is.null(x$input$posthoc)) {
cat0("\n### Post hoc test: ", x$input$posthoc,"\n\n");
if (x$input$posthoc %IN% c('games-howell')) {
x$intermediate$posthoc <- as.data.frame(x$intermediate$posthoc);
x$intermediate$posthoc[, 1:(ncol(x$intermediate$posthoc)-1)] <-
round(x$intermediate$posthoc[, 1:(ncol(x$intermediate$posthoc)-1)], digits);
x$intermediate$posthoc[, ncol(x$intermediate$posthoc)] <-
formatPvalue(x$intermediate$posthoc[, ncol(x$intermediate$posthoc)], digits=digits+1, includeP=FALSE);
print(x$intermediate$posthoc, quote=FALSE);
}
else if (x$input$posthoc %IN% c('tukey')) {
x$intermediate$posthoc <- lapply(x$intermediate$posthoc, function(x) {
x[, 1:(ncol(x)-1)] <- round(x[, 1:(ncol(x)-1)], digits);
x[, ncol(x)] <- formatPvalue(x[,ncol(x)], digits=digits+1, includeP=FALSE);
return(x);
});
print(x$intermediate$posthoc[[1]], quote=FALSE);
}
else {
x$intermediate$posthoc$p.value <- formatPvalue(x$intermediate$posthoc$p.value, digits=pvalueDigits, includeP=FALSE);
print(x$intermediate$posthoc$p.value, quote=FALSE, na.print="");
}
}
if (x$input$corrections) {
cat0("\n### Welch correction for nonhomogeneous variances:\n\n",
"F[", x$intermediate$welch$parameter[1],
", ", round(x$intermediate$welch$parameter[2], digits),
"] = ", round(x$intermediate$welch$statistic, digits),
", ", formatPvalue(x$intermediate$welch$p.value, digits=digits+1),
".\n");
cat0("\n### Brown-Forsythe correction for nonhomogeneous variances:\n\n",
"F[", x$intermediate$brown.forsythe$Df1,
", ", round(x$intermediate$brown.forsythe$Df2, digits),
"] = ", round(x$intermediate$brown.forsythe$F, digits),
", ", formatPvalue(x$intermediate$brown.forsythe$p, digits=digits+1),
".\n");
}
}
pander.oneway <- function(x, digits=x$input$digits,
pvalueDigits=x$input$pvalueDigits,
headerStyle = "**",
na.print="", ...) {
cat0("\n\n", headerStyle, "Oneway Anova for y=", x$input$y.name,
" and x=", x$input$x.name, " (groups: ",
vecTxt(levels(x$input$x)),
")", headerStyle, "\n\n");
if (x$input$omegasq) {
print(x$intermediate$confIntOmegaSq, digits=digits);
cat(' \n');
}
if (x$input$etasq) {
cat0("Eta Squared: ", round(x$input$conf.level * 100),
"% CI = [", formatR(x$output$etasq.ci[1], digits=digits),
"; ", formatR(x$output$etasq.ci[2], digits=digits),
"], point estimate = ", formatR(x$output$etasq, digits=digits));
}
if (x$input$omegasq | x$input$etasq) {
cat('\n\n');
}
x$output$dat[, 1:4] <- round(x$output$dat[, 1:4], digits);
### Format p-values nicely
x$output$dat$p <- formatPvalue(x$output$dat$p,
digits=pvalueDigits,
includeP=FALSE);
### Temporarily store row names and transform everything to character
tmpRowNames <- row.names(x$output$dat);
x$output$dat <- data.frame(lapply(x$output$dat, as.character));
row.names(x$output$dat) <- tmpRowNames;
if(x$input$t) {
pander(t(x$output$dat), missing="");
} else {
pander(x$output$dat, missing="");
}
if (x$input$plot) {
grid.draw(x$output$plot);
}
if (x$input$means) {
cat0("\n\n", headerStyle, "Means for y (", x$input$y.name,
") separate for each level of x (", x$input$x.name, "):",
headerStyle);
lapply(1:length(x$intermediate$means), function(index) {
cat0("\n\n", x$input$x.name, " = ",
names(x$intermediate$means[index]), ": \n\n");
pander(x$intermediate$means[[index]], digits=digits);
});
cat("\n");
}
if (x$input$levene) {
cat0("\n\n", headerStyle, "Levene's test for homogeneity of variance:",
headerStyle, "\n\n",
"F<sub>", x$intermediate$leveneTest[1, 1],
", ", x$intermediate$leveneTest[2, 1],
"</sub> = ", round(x$intermediate$leveneTest[1, 2], digits),
", ", formatPvalue(x$intermediate$leveneTest[1, 3], digits=digits+1),
".\n");
}
if (!is.null(x$input$posthoc)) {
cat(paste0("\n\n", headerStyle, "Post hoc test: ", x$input$posthoc,
headerStyle, "\n\n"));
if (x$input$posthoc %IN% c('games-howell')) {
x$intermediate$posthoc <- as.data.frame(x$intermediate$posthoc);
x$intermediate$posthoc[, 1:(ncol(x$intermediate$posthoc)-1)] <-
round(x$intermediate$posthoc[, 1:(ncol(x$intermediate$posthoc)-1)], digits);
x$intermediate$posthoc[, ncol(x$intermediate$posthoc)] <-
formatPvalue(x$intermediate$posthoc[, ncol(x$intermediate$posthoc)], digits=digits+1, includeP=FALSE);
pander(x$intermediate$posthoc, missing="");
}
else if (x$input$posthoc %IN% c('tukey')) {
x$intermediate$posthoc <- lapply(x$intermediate$posthoc, function(x) {
x[, 1:(ncol(x)-1)] <- round(x[, 1:(ncol(x)-1)], digits);
x[, ncol(x)] <- formatPvalue(x[,ncol(x)], digits=digits+1, includeP=FALSE);
return(x);
});
pander(x$intermediate$posthoc[[1]], missing="");
}
else {
x$intermediate$posthoc$p.value <- formatPvalue(x$intermediate$posthoc$p.value, digits=pvalueDigits, includeP=FALSE);
pander(x$intermediate$posthoc$p.value, missing="");
}
}
if (x$input$corrections) {
cat0("\n\n", headerStyle, "Welch correction for nonhomogeneous variances:",
headerStyle, "\n\n",
"F<sub>", x$intermediate$welch$parameter[1],
", ", round(x$intermediate$welch$parameter[2], digits),
"</sub> = ", round(x$intermediate$welch$statistic, digits),
", ", formatPvalue(x$intermediate$welch$p.value, digits=digits+1),
".");
cat0("\n\n", headerStyle, "Brown-Forsythe correction for nonhomogeneous variances:",
headerStyle, "\n\n",
"F<sub>", x$intermediate$brown.forsythe$Df1,
", ", round(x$intermediate$brown.forsythe$Df2, digits),
"</sub> = ", round(x$intermediate$brown.forsythe$F, digits),
", ", formatPvalue(x$intermediate$brown.forsythe$p, digits=digits+1),
".\n");
}
cat("\n\n");
}
|
# Inner function to make sample stats object:
samplestats_norawts <- function(
data, # Dataset
vars, # character indicating the variables Extracted if missing from data - group variable
ordered = character(0),
groups, # ignored if missing. Can be character indicating groupvar, or vector with names of groups
covs, # alternative covs (array nvar * nvar * ngroup)
means, # alternative means (matrix nvar * ngroup)
nobs, # Alternative if data is missing (length ngroup)
missing = c("listwise","pairwise"),
fimldata = FALSE,
verbose = TRUE,
storedata = FALSE,
weightsmatrix = "none", #c("none","identity","full","diag")
meanstructure = TRUE,
covtype = c("choose","ML","UB"),
corinput,
standardize = c("none","z","quantile"),
fullFIML = FALSE # <- if estimator is FIML, do full FIML. Usually not needed...
){
missing <- match.arg(missing)
covtype <- match.arg(covtype)
standardize <- match.arg(standardize)
# For corinput, set covtype to ML:
if (!missing(corinput)){
if (isTRUE(corinput)){
if (covtype == "UB"){
warning("Setting covtype = 'ML' because corinput = TRUE")
}
covtype <- "ML"
}
}
# weightsmatrix <- match.arg(weightsmatrix)
# Check data:
if (missing(data) & length(ordered) >0){
stop("Ordinal data only supported with raw data as input.")
}
if (missing(data) & missing(covs)){
stop("'data' and 'covs' may not both be missing")
}
if (!missing(data) & !missing(covs)){
stop("'data' and 'covs' may not both *not* be missing")
}
if (missing(data) & storedata){
stop("'data' may not be missing if 'storedata = TRUE'")
}
if (missing(data) & is.character(weightsmatrix) && weightsmatrix %in% c("full","diag")){
stop("'data' may not be missing if estimator = 'WLS' or esitmator = 'DWLS'")
}
# If data is supplied:
if (!missing(data) && !is.null(data)){
if (!is.data.frame(data) & !is.matrix(data)){
stop("'data' must be a data frame or matrix")
}
if (is.matrix(data)){
data <- as.data.frame(data)
}
# If group variable is missing, add (dummy):
if (missing(groups)|| is.null(groups)){
groups <- "group"
data[[groups]] <- "fullsample"
}
# Extract group names:
groupNames <- unique(data[[groups]])
# number of groups:
nGroup <- length(groupNames)
# Overwrite group with integer:
data[[groups]] <- match(data[[groups]], groupNames)
# If vars is missing, obtain from data:
if (missing(vars)){
vars <- names(data[,names(data)!=groups])
}
# Number of variables:
nVars <- length(vars)
# Remove all rows with full missing:
data <- data[rowSums(is.na(data[,vars])) < nVars,]
# Standardize the data:
if (standardize == "z"){
for (v in seq_along(vars)){
data[,vars[v]] <- (data[,vars[v]] - mean(data[,vars[v]],na.rm=TRUE)) / sd(data[,vars[v]],na.rm=TRUE)
}
} else if (standardize == "quantile"){
for (v in seq_along(vars)){
data[,vars[v]] <- quantiletransform(data[,vars[v]])
}
}
# Estimate covariances:
# lavOut <- lavaan::lavCor(data[,c(vars,groups)], missing = missing,output = "lavaan", group = groups,
# meanstructure = TRUE)
# sampleStats <- lavaan::lavInspect(lavOut, what = "sample")
# If missing is listwise, just cut out all NA rows:
if (missing == "listwise" && !fimldata){
data <- data[rowSums(is.na(data[,c(vars)])) == 0,]
}
# If ordered is TRUE, set all to ordered:
if (isTRUE(ordered)){
ordered <- vars
}
# Check if none or all are ordered:
# FIXME: ADD POLYSERIALS LATER!!!
if (!sum(vars %in% ordered) == 0 && !sum(vars %in% ordered) == length(vars)){
stop("Either all variables or no variables may be ordered...")
}
# Do I need a WLS.W?
if (length(ordered) > 0 && is.character(weightsmatrix)){
needWLSV <- TRUE
weightsmatrix <- list()
} else {
needWLSV <- FALSE
}
# Create covs and means arguments:
if (nGroup == 1){
if (length(ordered) > 0){
# Run the Cpp function:
prepRes <- covPrepare_cpp(
as.matrix(data[,vars]),
vars %in% ordered,
WLSweights = needWLSV,
verbose = verbose
)
if (any(eigen(prepRes$covmat)$values < 0)){
stop("Correlation matrix is not positive semi-definite.")
}
# Obtain results:
covs <- list(as(prepRes$covmat, "matrix"))
cors <- list(as(cov2cor(prepRes$covmat), "matrix"))
thresholds <- list(prepRes$means_thresholds)
means <- list(rep(NA,length(vars)))
for (i in seq_along(vars)){
if (!vars[i] %in% ordered){
means[[1]][i] <- prepRes$means_thresholds[[i]]
thresholds[[1]][[i]] <- numeric(0)
}
}
if (needWLSV){
weightsmatrix[[1]] <- prepRes$WLS_V
}
squares <- list(as(matrix(NA,nrow(prepRes$covmat),ncol(prepRes$covmat)), "matrix"))
} else {
cov <- (nrow(data[,c(vars)])-1)/(nrow(data[,c(vars)])) * cov(data[,c(vars)], use = switch(
missing, "listwise" = "complete.obs", "pairwise" = "pairwise.complete.obs"
))
cov <- 0.5*(cov + t(cov))
# covs <- list(as(cov,"dsyMatrix"))
covs <- list(as(cov,"matrix"))
if (!any(is.na(cov))){
cors <- list(as(new("corMatrix", cov2cor(cov), sd = diag(cov)),"matrix"))
} else {
cors <- list()
}
dataMat <- as.matrix(data[,c(vars)])
squares <- list(as(t(dataMat) %*% dataMat,"matrix"))
rm(dataMat)
# cors <- list(new("corMatrix", cov2cor(cov), sd = diag(cov)))
means <- list(colMeans(data[,c(vars)], na.rm = TRUE))
thresholds <- list(list())
}
# groupNames <- unique(data[[groups]])
} else {
covs <- list()
cors <- list()
means <- list()
squares <- list()
thresholds <- lapply(1:nGroup,function(x)list())
# groupNames <- unique(data[[groups]])
for (g in 1:nGroup){
if (length(ordered) > 0){
# Run the Cpp function:
prepRes <- covPrepare_cpp(
as.matrix(data[,vars]),
vars %in% ordered,
WLSweights = needWLSV
)
if (any(eigen(prepRes$covmat)$values < 0)){
stop("Correlation matrix is not positive semi-definite.")
}
# Obtain results:
covs[[g]] <- as(prepRes$covmat, "matrix")
cors[[g]] <- as(cov2cor(prepRes$covmat), "matrix")
thresholds[[g]] <- prepRes$means_thresholds
means[[g]] <- rep(NA,length(vars))
for (i in seq_along(vars)){
if (!vars[i] %in% ordered){
means[[g]][i] <- prepRes$means_thresholds[[i]]
thresholds[[g]][[i]] <- numeric(0)
}
}
if (needWLSV){
weightsmatrix[[g]] <- prepRes$WLS_V
}
} else {
subData <- data[data[[groups]] == g,c(vars)]
cov <- (nrow(subData)-1)/(nrow(subData)) *
cov(subData, use = switch(
missing, "listwise" = "complete.obs", "pairwise" = "pairwise.complete.obs"
))
cov <- 0.5*(cov + t(cov))
# covs[[g]] <- as(cov,"dsyMatrix")
covs[[g]] <- as(cov,"matrix")
if (!any(is.na(cov))){
cors[[g]] <- as(new("corMatrix", cov2cor(cov), sd = diag(cov)),"matrix")
} else {
cors[[g]] <- NA
}
subData <- as.matrix(subData)
squares[[g]] <- as(t(subData) %*% subData,"matrix")
means[[g]] <- colMeans(subData, na.rm = TRUE)
}
}
# cors <- lapply(sampleStats,function(x){
# cov <- x$cov
# class(cov) <- "matrix"
# mat <- new("corMatrix", cov2cor(cov), sd = diag(cov))
# mat
# })
# covs <- lapply(sampleStats,function(x){
# cov <- x$cov
# class(cov) <- "matrix"
# as(cov,"dpoMatrix")
# })
# means <- lapply(sampleStats,function(x){
# matrix(unclass(x$mean))
# })
# groupNames <- names(sampleStats)
}
if (!missing(nobs)){
warning("'nobs' argument ignored and obtained from data")
}
nobs <- as.vector(tapply(data[[groups]],data[[groups]],length))
} else {
thresholds <- list()
if (standardize != "none") warning("'standardize' ignored when raw data is not used.")
### Input via matrices ###
# Check groups:
if (missing(groups) || is.null(groups)){
if (is.array(covs) && length(dim(covs)) > 2){
if (!is.null(dimnames(covs)[[3]])){
groups <- groupNames <- paste0("group_",seq_len(dim(covs)[[3]]))
} else {
groups <- groupNames <- dimnames(covs)[[3]]
}
} else if (is.list(covs)){
if (!is.null(names(covs))){
groups <- groupNames <- names(covs)
} else {
groups <- groupNames <- paste0("group_",seq_len(length(covs)))
}
} else {
groups <- groupNames <- "fullsample"
}
} else {
groupNames <- groups
}
nGroup <- length(groups)
# if nobs missing, stop:
if (missing(nobs)){
stop("'nobs' may not be missing")
}
if (length(nobs) != nGroup){
stop("'nobs' must be a vector with sample size per group")
}
# Check if covs is array:
if (!is.array(covs)){
if (!is.list(covs)){
# class(covs) <- "matrix"
# covs <- list(as(covs,"dpoMatrix"))
covs <- list(as(covs,"matrix"))
# cors <- list(new("corMatrix", cov2cor(covs), sd = diag(covs)))
if (!any(is.na(covs))){
cors <- list(as(new("corMatrix", cov2cor(covs), sd = diag(covs)),"matrix"))
} else {
cors <- list(NA)
}
} else {
cors <- lapply(covs,function(x){
if (!any(is.na(x))){
return(as(new("corMatrix", cov2cor(x), sd = diag(x)),"matrix"))
} else {
return(NA)
}
# new("corMatrix", cov2cor(x), sd = diag(x))
})
}
nVars <- ncol(covs[[1]])
} else {
# Make array
if (length(dim(covs)) == 2){
if (!is.null(colnames(covs))){
vars <- colnames(covs)
} else {
vars <- paste0("V",seq_len(ncol(covs)))
}
covs <- array(covs,c(dim(covs),nGroup))
dimnames(covs) <- list(vars,vars,NULL)
}
# Now create list:
covsArray <- covs
covs <- list()
cors <- list()
for (g in 1:nGroup){
covs[[g]] <- as(covsArray[,,g],"matrix")
cors[[g]] <- as(new("corMatrix", cov2cor(covsArray[,,g]), sd = diag(covsArray[,,g])), "matrix")
}
}
# Number of vars:
nVars <- nrow(covs[[1]])
# Varnames:
if (missing(vars)){
if (!is.null(colnames(covs[[1]]))){
vars <- colnames(covs[[1]])
} else {
vars <- paste0("V",seq_len(nVars))
}
}
# Check if means is missing:
if (missing(means)){
means <- lapply(1:nGroup,function(x)matrix(0,nVars,1))
}
# Check if means is matrix:
if (!is.list(means)){
means <-lapply(1:nGroup,function(x)means)
}
# Determine squares and covtype:
# Maximum likelihood:
if (covtype %in% c("ML","choose")){
MLsquares <- list()
for (i in seq_along(covs)){
MLsquares[[i]] <- nobs[i] * (covs[[i]] + means[[i]] %*% t(means[[i]]))
}
}
# Unbiased:
if (covtype %in% c("UB","choose")){
UBsquares <- list()
for (i in seq_along(covs)){
UBsquares[[i]] <- nobs[i] * (covUBtoML(covs[[i]],nobs[i]) + means[[i]] %*% t(means[[i]]))
}
}
# If we need to choose, pick the one that looks the most like integers:
if (covtype == "choose"){
MLrest <- mean(round(unlist(lapply(MLsquares,as.matrix)),10) %% 1)
UBrest <- mean(round(unlist(lapply(UBsquares,as.matrix)),10) %% 1)
if (MLrest < UBrest){
# if (verbose){
message("Assuming denominator n was used in covariance computation (covtype = 'ML').")
# }
squares <- MLsquares
} else {
# if (verbose){
message("Assuming denominator n-1 was used in covariance computation (covtype = 'UB').")
# }
squares <- UBsquares
}
} else if (covtype == "ML"){
squares <- MLsquares
} else {
squares <- UBsquares
}
# Transform covs if needed:
if (covtype == "UB"){
for (i in seq_along(covs)){
covs[[i]] <- covUBtoML(as.matrix(covs[[i]]), nobs[i])
}
}
}
# Check if cov is dpoMatrix:
# for (i in seq_along(covs)){
# if (!is(covs[[i]],"dsyMatrix")){
# covs[[i]] <- as(covs[[i]], "dsyMatrix")
# }
# }
# Set names:
names(covs) <- groupNames
names(means) <- groupNames
# Determine corinput (will also detect if standardized data was used as input):
if (missing(corinput)){
if (all(
sapply(covs,function(x){
all(abs(diag(x) - 1) < sqrt(.Machine$double.eps))
})
)){
corinput <- TRUE
} else {
corinput <- FALSE
}
}
# Generate samplestats object:
object <- generate_psychonetrics_samplestats(covs = covs, cors = cors, means = means, corinput = corinput, thresholds = thresholds, squares = squares, fullFIML=fullFIML, groupvar=groups)
# Fill groups:
object@groups <- data.frame(
label = groupNames,
id = seq_along(groupNames),
nobs = nobs, stringsAsFactors = FALSE
)
# Fill variables:
object@variables <- data.frame(
label = vars,
id = seq_along(vars),
ordered = vars %in% ordered,
stringsAsFactors = FALSE
)
# add fiml data (still summary statistics...):
if (fimldata){
if (!missing(data)){
object@fimldata <- lapply(seq_along(groupNames),function(x){
if (fullFIML){
fullfimldata(data[data[[groups]] == x,vars],verbose=verbose)
} else {
missingpatterns(data[data[[groups]] == x,vars],verbose=verbose)
}
})
} else {
object@fimldata <- lapply(seq_along(groupNames),function(x){
missingpatterns_covs(means[[x]],covs[[x]],nobs[x],verbose=verbose)
})
}
}
# add full data:
if (storedata){
# Overwrite group with name:
data[[groups]] <- groupNames[ data[[groups]]]
object@rawdata <- data[,c(vars, groups)]
attr(object@rawdata, "vars") <- vars
attr(object@rawdata, "groups") <- groups
attr(object@rawdata, "missing") <- missing
attr(object@rawdata, "fimldata") <- fimldata
}
# add WLS.W:
if (is.list(weightsmatrix) || is.matrix(weightsmatrix)){
if (is.list(weightsmatrix)){
object@WLS.W <- lapply(weightsmatrix, function(x)x) # as(weightsmatrix,"Matrix")
} else {
object@WLS.W <- lapply(1:nGroup,function(x)weightsmatrix)
}
# Check if mean structure is added, otherwise add identitiy matrix:
# FIXME: DISABLED FOR NOW
# if (ncol(object@WLS.W[[1]]) != nVars + nVars*(nVars+1)/2){
# if (ncol(object@WLS.W[[1]]) == nVars*(nVars+1)/2){
# if (verbose && meanstructure){
# warning("WLS.W only supplied for variance/covariance structure. Adding identitiy matrix to means part.")
# }
# object@WLS.W[[1]] <- rbind(
# cbind(Diagonal(nVars), Matrix(0,nVars,nVars*(nVars+1)/2)),
# cbind(Matrix(0,nVars*(nVars+1)/2,nVars), object@WLS.W[[1]])
# )
# } else {
# stop("WLS.W not of appropriate dimension.")
# }
# }
# }
} else {
for (g in 1:nGroup){
if (is.character(weightsmatrix) && weightsmatrix != "none"){
for (g in 1:nGroup){
if (weightsmatrix == "identity"){
object@WLS.W[[g]] <- diag(nVars + nVars*(nVars+1)/2)
} else if (weightsmatrix == "full"){
subData <- data[data[[groups]] == g,c(vars)]
object@WLS.W[[g]] <- as.matrix(LS_weightsmat(subData,meanstructure=meanstructure,corinput=corinput))
} else if (weightsmatrix == "diag"){
subData <- data[data[[groups]] == g,c(vars)]
object@WLS.W[[g]] <- as.matrix(LS_weightsmat(subData, type = "diagonal",meanstructure=meanstructure,corinput=corinput))
}
}
}
}
}
# Return object:
return(object)
} | /R/03_modelformation_samplestats_norawts.R | no_license | st-egger/psychonetrics | R | false | false | 17,014 | r | # Inner function to make sample stats object:
samplestats_norawts <- function(
data, # Dataset
vars, # character indicating the variables Extracted if missing from data - group variable
ordered = character(0),
groups, # ignored if missing. Can be character indicating groupvar, or vector with names of groups
covs, # alternative covs (array nvar * nvar * ngroup)
means, # alternative means (matrix nvar * ngroup)
nobs, # Alternative if data is missing (length ngroup)
missing = c("listwise","pairwise"),
fimldata = FALSE,
verbose = TRUE,
storedata = FALSE,
weightsmatrix = "none", #c("none","identity","full","diag")
meanstructure = TRUE,
covtype = c("choose","ML","UB"),
corinput,
standardize = c("none","z","quantile"),
fullFIML = FALSE # <- if estimator is FIML, do full FIML. Usually not needed...
){
missing <- match.arg(missing)
covtype <- match.arg(covtype)
standardize <- match.arg(standardize)
# For corinput, set covtype to ML:
if (!missing(corinput)){
if (isTRUE(corinput)){
if (covtype == "UB"){
warning("Setting covtype = 'ML' because corinput = TRUE")
}
covtype <- "ML"
}
}
# weightsmatrix <- match.arg(weightsmatrix)
# Check data:
if (missing(data) & length(ordered) >0){
stop("Ordinal data only supported with raw data as input.")
}
if (missing(data) & missing(covs)){
stop("'data' and 'covs' may not both be missing")
}
if (!missing(data) & !missing(covs)){
stop("'data' and 'covs' may not both *not* be missing")
}
if (missing(data) & storedata){
stop("'data' may not be missing if 'storedata = TRUE'")
}
if (missing(data) & is.character(weightsmatrix) && weightsmatrix %in% c("full","diag")){
stop("'data' may not be missing if estimator = 'WLS' or esitmator = 'DWLS'")
}
# If data is supplied:
if (!missing(data) && !is.null(data)){
if (!is.data.frame(data) & !is.matrix(data)){
stop("'data' must be a data frame or matrix")
}
if (is.matrix(data)){
data <- as.data.frame(data)
}
# If group variable is missing, add (dummy):
if (missing(groups)|| is.null(groups)){
groups <- "group"
data[[groups]] <- "fullsample"
}
# Extract group names:
groupNames <- unique(data[[groups]])
# number of groups:
nGroup <- length(groupNames)
# Overwrite group with integer:
data[[groups]] <- match(data[[groups]], groupNames)
# If vars is missing, obtain from data:
if (missing(vars)){
vars <- names(data[,names(data)!=groups])
}
# Number of variables:
nVars <- length(vars)
# Remove all rows with full missing:
data <- data[rowSums(is.na(data[,vars])) < nVars,]
# Standardize the data:
if (standardize == "z"){
for (v in seq_along(vars)){
data[,vars[v]] <- (data[,vars[v]] - mean(data[,vars[v]],na.rm=TRUE)) / sd(data[,vars[v]],na.rm=TRUE)
}
} else if (standardize == "quantile"){
for (v in seq_along(vars)){
data[,vars[v]] <- quantiletransform(data[,vars[v]])
}
}
# Estimate covariances:
# lavOut <- lavaan::lavCor(data[,c(vars,groups)], missing = missing,output = "lavaan", group = groups,
# meanstructure = TRUE)
# sampleStats <- lavaan::lavInspect(lavOut, what = "sample")
# If missing is listwise, just cut out all NA rows:
if (missing == "listwise" && !fimldata){
data <- data[rowSums(is.na(data[,c(vars)])) == 0,]
}
# If ordered is TRUE, set all to ordered:
if (isTRUE(ordered)){
ordered <- vars
}
# Check if none or all are ordered:
# FIXME: ADD POLYSERIALS LATER!!!
if (!sum(vars %in% ordered) == 0 && !sum(vars %in% ordered) == length(vars)){
stop("Either all variables or no variables may be ordered...")
}
# Do I need a WLS.W?
if (length(ordered) > 0 && is.character(weightsmatrix)){
needWLSV <- TRUE
weightsmatrix <- list()
} else {
needWLSV <- FALSE
}
# Create covs and means arguments:
if (nGroup == 1){
if (length(ordered) > 0){
# Run the Cpp function:
prepRes <- covPrepare_cpp(
as.matrix(data[,vars]),
vars %in% ordered,
WLSweights = needWLSV,
verbose = verbose
)
if (any(eigen(prepRes$covmat)$values < 0)){
stop("Correlation matrix is not positive semi-definite.")
}
# Obtain results:
covs <- list(as(prepRes$covmat, "matrix"))
cors <- list(as(cov2cor(prepRes$covmat), "matrix"))
thresholds <- list(prepRes$means_thresholds)
means <- list(rep(NA,length(vars)))
for (i in seq_along(vars)){
if (!vars[i] %in% ordered){
means[[1]][i] <- prepRes$means_thresholds[[i]]
thresholds[[1]][[i]] <- numeric(0)
}
}
if (needWLSV){
weightsmatrix[[1]] <- prepRes$WLS_V
}
squares <- list(as(matrix(NA,nrow(prepRes$covmat),ncol(prepRes$covmat)), "matrix"))
} else {
cov <- (nrow(data[,c(vars)])-1)/(nrow(data[,c(vars)])) * cov(data[,c(vars)], use = switch(
missing, "listwise" = "complete.obs", "pairwise" = "pairwise.complete.obs"
))
cov <- 0.5*(cov + t(cov))
# covs <- list(as(cov,"dsyMatrix"))
covs <- list(as(cov,"matrix"))
if (!any(is.na(cov))){
cors <- list(as(new("corMatrix", cov2cor(cov), sd = diag(cov)),"matrix"))
} else {
cors <- list()
}
dataMat <- as.matrix(data[,c(vars)])
squares <- list(as(t(dataMat) %*% dataMat,"matrix"))
rm(dataMat)
# cors <- list(new("corMatrix", cov2cor(cov), sd = diag(cov)))
means <- list(colMeans(data[,c(vars)], na.rm = TRUE))
thresholds <- list(list())
}
# groupNames <- unique(data[[groups]])
} else {
covs <- list()
cors <- list()
means <- list()
squares <- list()
thresholds <- lapply(1:nGroup,function(x)list())
# groupNames <- unique(data[[groups]])
for (g in 1:nGroup){
if (length(ordered) > 0){
# Run the Cpp function:
prepRes <- covPrepare_cpp(
as.matrix(data[,vars]),
vars %in% ordered,
WLSweights = needWLSV
)
if (any(eigen(prepRes$covmat)$values < 0)){
stop("Correlation matrix is not positive semi-definite.")
}
# Obtain results:
covs[[g]] <- as(prepRes$covmat, "matrix")
cors[[g]] <- as(cov2cor(prepRes$covmat), "matrix")
thresholds[[g]] <- prepRes$means_thresholds
means[[g]] <- rep(NA,length(vars))
for (i in seq_along(vars)){
if (!vars[i] %in% ordered){
means[[g]][i] <- prepRes$means_thresholds[[i]]
thresholds[[g]][[i]] <- numeric(0)
}
}
if (needWLSV){
weightsmatrix[[g]] <- prepRes$WLS_V
}
} else {
subData <- data[data[[groups]] == g,c(vars)]
cov <- (nrow(subData)-1)/(nrow(subData)) *
cov(subData, use = switch(
missing, "listwise" = "complete.obs", "pairwise" = "pairwise.complete.obs"
))
cov <- 0.5*(cov + t(cov))
# covs[[g]] <- as(cov,"dsyMatrix")
covs[[g]] <- as(cov,"matrix")
if (!any(is.na(cov))){
cors[[g]] <- as(new("corMatrix", cov2cor(cov), sd = diag(cov)),"matrix")
} else {
cors[[g]] <- NA
}
subData <- as.matrix(subData)
squares[[g]] <- as(t(subData) %*% subData,"matrix")
means[[g]] <- colMeans(subData, na.rm = TRUE)
}
}
# cors <- lapply(sampleStats,function(x){
# cov <- x$cov
# class(cov) <- "matrix"
# mat <- new("corMatrix", cov2cor(cov), sd = diag(cov))
# mat
# })
# covs <- lapply(sampleStats,function(x){
# cov <- x$cov
# class(cov) <- "matrix"
# as(cov,"dpoMatrix")
# })
# means <- lapply(sampleStats,function(x){
# matrix(unclass(x$mean))
# })
# groupNames <- names(sampleStats)
}
if (!missing(nobs)){
warning("'nobs' argument ignored and obtained from data")
}
nobs <- as.vector(tapply(data[[groups]],data[[groups]],length))
} else {
thresholds <- list()
if (standardize != "none") warning("'standardize' ignored when raw data is not used.")
### Input via matrices ###
# Check groups:
if (missing(groups) || is.null(groups)){
if (is.array(covs) && length(dim(covs)) > 2){
if (!is.null(dimnames(covs)[[3]])){
groups <- groupNames <- paste0("group_",seq_len(dim(covs)[[3]]))
} else {
groups <- groupNames <- dimnames(covs)[[3]]
}
} else if (is.list(covs)){
if (!is.null(names(covs))){
groups <- groupNames <- names(covs)
} else {
groups <- groupNames <- paste0("group_",seq_len(length(covs)))
}
} else {
groups <- groupNames <- "fullsample"
}
} else {
groupNames <- groups
}
nGroup <- length(groups)
# if nobs missing, stop:
if (missing(nobs)){
stop("'nobs' may not be missing")
}
if (length(nobs) != nGroup){
stop("'nobs' must be a vector with sample size per group")
}
# Check if covs is array:
if (!is.array(covs)){
if (!is.list(covs)){
# class(covs) <- "matrix"
# covs <- list(as(covs,"dpoMatrix"))
covs <- list(as(covs,"matrix"))
# cors <- list(new("corMatrix", cov2cor(covs), sd = diag(covs)))
if (!any(is.na(covs))){
cors <- list(as(new("corMatrix", cov2cor(covs), sd = diag(covs)),"matrix"))
} else {
cors <- list(NA)
}
} else {
cors <- lapply(covs,function(x){
if (!any(is.na(x))){
return(as(new("corMatrix", cov2cor(x), sd = diag(x)),"matrix"))
} else {
return(NA)
}
# new("corMatrix", cov2cor(x), sd = diag(x))
})
}
nVars <- ncol(covs[[1]])
} else {
# Make array
if (length(dim(covs)) == 2){
if (!is.null(colnames(covs))){
vars <- colnames(covs)
} else {
vars <- paste0("V",seq_len(ncol(covs)))
}
covs <- array(covs,c(dim(covs),nGroup))
dimnames(covs) <- list(vars,vars,NULL)
}
# Now create list:
covsArray <- covs
covs <- list()
cors <- list()
for (g in 1:nGroup){
covs[[g]] <- as(covsArray[,,g],"matrix")
cors[[g]] <- as(new("corMatrix", cov2cor(covsArray[,,g]), sd = diag(covsArray[,,g])), "matrix")
}
}
# Number of vars:
nVars <- nrow(covs[[1]])
# Varnames:
if (missing(vars)){
if (!is.null(colnames(covs[[1]]))){
vars <- colnames(covs[[1]])
} else {
vars <- paste0("V",seq_len(nVars))
}
}
# Check if means is missing:
if (missing(means)){
means <- lapply(1:nGroup,function(x)matrix(0,nVars,1))
}
# Check if means is matrix:
if (!is.list(means)){
means <-lapply(1:nGroup,function(x)means)
}
# Determine squares and covtype:
# Maximum likelihood:
if (covtype %in% c("ML","choose")){
MLsquares <- list()
for (i in seq_along(covs)){
MLsquares[[i]] <- nobs[i] * (covs[[i]] + means[[i]] %*% t(means[[i]]))
}
}
# Unbiased:
if (covtype %in% c("UB","choose")){
UBsquares <- list()
for (i in seq_along(covs)){
UBsquares[[i]] <- nobs[i] * (covUBtoML(covs[[i]],nobs[i]) + means[[i]] %*% t(means[[i]]))
}
}
# If we need to choose, pick the one that looks the most like integers:
if (covtype == "choose"){
MLrest <- mean(round(unlist(lapply(MLsquares,as.matrix)),10) %% 1)
UBrest <- mean(round(unlist(lapply(UBsquares,as.matrix)),10) %% 1)
if (MLrest < UBrest){
# if (verbose){
message("Assuming denominator n was used in covariance computation (covtype = 'ML').")
# }
squares <- MLsquares
} else {
# if (verbose){
message("Assuming denominator n-1 was used in covariance computation (covtype = 'UB').")
# }
squares <- UBsquares
}
} else if (covtype == "ML"){
squares <- MLsquares
} else {
squares <- UBsquares
}
# Transform covs if needed:
if (covtype == "UB"){
for (i in seq_along(covs)){
covs[[i]] <- covUBtoML(as.matrix(covs[[i]]), nobs[i])
}
}
}
# Check if cov is dpoMatrix:
# for (i in seq_along(covs)){
# if (!is(covs[[i]],"dsyMatrix")){
# covs[[i]] <- as(covs[[i]], "dsyMatrix")
# }
# }
# Set names:
names(covs) <- groupNames
names(means) <- groupNames
# Determine corinput (will also detect if standardized data was used as input):
if (missing(corinput)){
if (all(
sapply(covs,function(x){
all(abs(diag(x) - 1) < sqrt(.Machine$double.eps))
})
)){
corinput <- TRUE
} else {
corinput <- FALSE
}
}
# Generate samplestats object:
object <- generate_psychonetrics_samplestats(covs = covs, cors = cors, means = means, corinput = corinput, thresholds = thresholds, squares = squares, fullFIML=fullFIML, groupvar=groups)
# Fill groups:
object@groups <- data.frame(
label = groupNames,
id = seq_along(groupNames),
nobs = nobs, stringsAsFactors = FALSE
)
# Fill variables:
object@variables <- data.frame(
label = vars,
id = seq_along(vars),
ordered = vars %in% ordered,
stringsAsFactors = FALSE
)
# add fiml data (still summary statistics...):
if (fimldata){
if (!missing(data)){
object@fimldata <- lapply(seq_along(groupNames),function(x){
if (fullFIML){
fullfimldata(data[data[[groups]] == x,vars],verbose=verbose)
} else {
missingpatterns(data[data[[groups]] == x,vars],verbose=verbose)
}
})
} else {
object@fimldata <- lapply(seq_along(groupNames),function(x){
missingpatterns_covs(means[[x]],covs[[x]],nobs[x],verbose=verbose)
})
}
}
# add full data:
if (storedata){
# Overwrite group with name:
data[[groups]] <- groupNames[ data[[groups]]]
object@rawdata <- data[,c(vars, groups)]
attr(object@rawdata, "vars") <- vars
attr(object@rawdata, "groups") <- groups
attr(object@rawdata, "missing") <- missing
attr(object@rawdata, "fimldata") <- fimldata
}
# add WLS.W:
if (is.list(weightsmatrix) || is.matrix(weightsmatrix)){
if (is.list(weightsmatrix)){
object@WLS.W <- lapply(weightsmatrix, function(x)x) # as(weightsmatrix,"Matrix")
} else {
object@WLS.W <- lapply(1:nGroup,function(x)weightsmatrix)
}
# Check if mean structure is added, otherwise add identitiy matrix:
# FIXME: DISABLED FOR NOW
# if (ncol(object@WLS.W[[1]]) != nVars + nVars*(nVars+1)/2){
# if (ncol(object@WLS.W[[1]]) == nVars*(nVars+1)/2){
# if (verbose && meanstructure){
# warning("WLS.W only supplied for variance/covariance structure. Adding identitiy matrix to means part.")
# }
# object@WLS.W[[1]] <- rbind(
# cbind(Diagonal(nVars), Matrix(0,nVars,nVars*(nVars+1)/2)),
# cbind(Matrix(0,nVars*(nVars+1)/2,nVars), object@WLS.W[[1]])
# )
# } else {
# stop("WLS.W not of appropriate dimension.")
# }
# }
# }
} else {
for (g in 1:nGroup){
if (is.character(weightsmatrix) && weightsmatrix != "none"){
for (g in 1:nGroup){
if (weightsmatrix == "identity"){
object@WLS.W[[g]] <- diag(nVars + nVars*(nVars+1)/2)
} else if (weightsmatrix == "full"){
subData <- data[data[[groups]] == g,c(vars)]
object@WLS.W[[g]] <- as.matrix(LS_weightsmat(subData,meanstructure=meanstructure,corinput=corinput))
} else if (weightsmatrix == "diag"){
subData <- data[data[[groups]] == g,c(vars)]
object@WLS.W[[g]] <- as.matrix(LS_weightsmat(subData, type = "diagonal",meanstructure=meanstructure,corinput=corinput))
}
}
}
}
}
# Return object:
return(object)
} |
source("../setpath.R")
random_seeds = c(1,2,3,4)
noises = c(5,13)
methods = c("EN","MI")
res = matrix(nrow=20,ncol=15)
res = data.frame(res)
file_name = "every_4_20_iter.csv"
count = 1
for (random_seed in random_seeds) {
for (noise in noises) {
for (method in methods) {
folder_name= paste("noise_robust/","errors_",random_seed,"realnoise_",noise,"anti_pool_falsemeasure_relative/",method,sep="")
file_path = file.path(out_data_path,folder_name,file_name)
list.files(file.path(out_data_path,folder_name))
if (file.exists(file_path)) {
data = read.csv(file_path, header=TRUE)
res[,count] = data[,2]
colnames(res)[count] = paste(random_seed,noise,method,sep="_")
count = count +1
}
}
}
}
mean_plot = data.frame(matrix(nrow=20,ncol = 4))
std_plot = data.frame(matrix(nrow=20,ncol = 4))
count = 1
for (noise in noises){
for (method in methods) {
tag = paste("_",noise,"_",method, sep="")
index = grep(tag, colnames(res))
mean_plot[,count] = apply(res[,index],1,mean)
colnames(mean_plot)[count] = paste(noise,method,sep="_")
count = count +1
}
}
| /OED_multiple_surface/src/Plot/Noise_effect.R | no_license | wmmxk/OptimalExperimentalDesign | R | false | false | 1,172 | r | source("../setpath.R")
random_seeds = c(1,2,3,4)
noises = c(5,13)
methods = c("EN","MI")
res = matrix(nrow=20,ncol=15)
res = data.frame(res)
file_name = "every_4_20_iter.csv"
count = 1
for (random_seed in random_seeds) {
for (noise in noises) {
for (method in methods) {
folder_name= paste("noise_robust/","errors_",random_seed,"realnoise_",noise,"anti_pool_falsemeasure_relative/",method,sep="")
file_path = file.path(out_data_path,folder_name,file_name)
list.files(file.path(out_data_path,folder_name))
if (file.exists(file_path)) {
data = read.csv(file_path, header=TRUE)
res[,count] = data[,2]
colnames(res)[count] = paste(random_seed,noise,method,sep="_")
count = count +1
}
}
}
}
mean_plot = data.frame(matrix(nrow=20,ncol = 4))
std_plot = data.frame(matrix(nrow=20,ncol = 4))
count = 1
for (noise in noises){
for (method in methods) {
tag = paste("_",noise,"_",method, sep="")
index = grep(tag, colnames(res))
mean_plot[,count] = apply(res[,index],1,mean)
colnames(mean_plot)[count] = paste(noise,method,sep="_")
count = count +1
}
}
|
# Exercícios - Tidymodels
# pacotes -----------------------------------------------------------------
library(tidymodels)
library(rpart.plot)
library(ISLR)
# dados ------------------------------------------------------------------
View(Hitters)
help(Hitters)
# exercício 1 -------------------------------------------------------------
# Defina uma especificação de f que caracterize uma árvore de regressão
# (mode 'regression') com o tree_depth = 3 e use "rpart" como 'engine'.
# Curiosidade (veremos depois): tree_depth é a profundidade máxima da árvore.
# Dicas: decision_tree(), set_engine(), set_mode().
# exercicio 2 -------------------------------------------------------------
# a) Usando a base Hitters, ajuste o modelo de árvore para os 'HmRun' (Home Runs em 1986)
# de cada jogador usando como variável explicativa 'CHmRun' (o número de Home Runs que ele fez na vida).
# Dicas: fit(), uma fórmula e um data.frame.
# b) use rpart.plot(ajuste$fit) para visualizar a sua árvore. O que a intensidade
# da cor azul informa?
# OBS: troque o nome do objeto 'ajuste' para o nome do objeto que você
# criou em (a). Por exemplo: rpart.plot(hitters_ajuste$fit)
# exercicio 3 --------------------------------------------------------------
# Coloque uma coluna a mais no banco de dados com as predições.
# Dicas: predict()
# exercicio 4 -------------------------------------------------------------
# Calcule RMSE, MAE e R-quadrado do modelo que você ajustou.
# Dicas: use as funções do yardstick rmse(), mae(), rsq().
# exercicio 5 [desafio] ---------------------------------------------------
# a) Faça um gráfico de dispersão entre HmRun e CHmRun e coloque a curva ajustada em cima.
# Dicas: use a tabela feita em (a). ggplot2() + geom_point() + geom_line()
# b) Faça um gráfico de dispersão comparando as suas predições e o que realmente aconteceu.
# Dicas: use a tabela feita em (a). ggplot2() + geom_point()
| /exercicios/01-tidymodels.R | no_license | brunocp76/CursoIntroML | R | false | false | 1,968 | r | # Exercícios - Tidymodels
# pacotes -----------------------------------------------------------------
library(tidymodels)
library(rpart.plot)
library(ISLR)
# dados ------------------------------------------------------------------
View(Hitters)
help(Hitters)
# exercício 1 -------------------------------------------------------------
# Defina uma especificação de f que caracterize uma árvore de regressão
# (mode 'regression') com o tree_depth = 3 e use "rpart" como 'engine'.
# Curiosidade (veremos depois): tree_depth é a profundidade máxima da árvore.
# Dicas: decision_tree(), set_engine(), set_mode().
# exercicio 2 -------------------------------------------------------------
# a) Usando a base Hitters, ajuste o modelo de árvore para os 'HmRun' (Home Runs em 1986)
# de cada jogador usando como variável explicativa 'CHmRun' (o número de Home Runs que ele fez na vida).
# Dicas: fit(), uma fórmula e um data.frame.
# b) use rpart.plot(ajuste$fit) para visualizar a sua árvore. O que a intensidade
# da cor azul informa?
# OBS: troque o nome do objeto 'ajuste' para o nome do objeto que você
# criou em (a). Por exemplo: rpart.plot(hitters_ajuste$fit)
# exercicio 3 --------------------------------------------------------------
# Coloque uma coluna a mais no banco de dados com as predições.
# Dicas: predict()
# exercicio 4 -------------------------------------------------------------
# Calcule RMSE, MAE e R-quadrado do modelo que você ajustou.
# Dicas: use as funções do yardstick rmse(), mae(), rsq().
# exercicio 5 [desafio] ---------------------------------------------------
# a) Faça um gráfico de dispersão entre HmRun e CHmRun e coloque a curva ajustada em cima.
# Dicas: use a tabela feita em (a). ggplot2() + geom_point() + geom_line()
# b) Faça um gráfico de dispersão comparando as suas predições e o que realmente aconteceu.
# Dicas: use a tabela feita em (a). ggplot2() + geom_point()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hazrate.like.R
\name{hazrate.like}
\alias{hazrate.like}
\title{Hazard rate likelihood function for distance analyses}
\usage{
hazrate.like(
a,
dist,
covars = NULL,
w.lo = 0,
w.hi = max(dist),
series = "cosine",
expansions = 0,
scale = TRUE,
pointSurvey = FALSE
)
}
\arguments{
\item{a}{A vector of likelihood parameter values. Length and meaning
depend on \code{series} and \code{expansions}. If no expansion terms
were called for
(i.e., \code{expansions = 0}), the distance likelihoods contain
one or two canonical parameters (see Details). If one or more
expansions are called for,
coefficients for the expansion terms follow coefficients for the
canonical parameters. If \code{p} is the number of canonical
parameters, coefficients
for the expansion terms are \code{a[(p+1):length(a)]}.}
\item{dist}{A numeric vector containing the observed distances.}
\item{covars}{Data frame containing values of covariates at
each observation in \code{dist}.}
\item{w.lo}{Scalar value of the lowest observable distance.
This is the \emph{left truncation} of sighting distances in
\code{dist}. Same units as \code{dist}.
Values less than \code{w.lo} are allowed in \code{dist},
but are ignored and their contribution to the likelihood is
set to \code{NA} in the output.}
\item{w.hi}{Scalar value of the largest observable distance.
This is the \emph{right truncation} of sighting distances in
\code{dist}. Same units as \code{dist}.
Values greater than \code{w.hi} are allowed in \code{dist},
but are ignored and their contribution to the likelihood is
set to \code{NA} in the output.}
\item{series}{A string specifying the type of expansion to use.
Currently, valid values are 'simple', 'hermite', and 'cosine'; but, see
\code{\link{dfuncEstim}} about defining other series.}
\item{expansions}{A scalar specifying the number of terms in
\code{series}. Depending on the series, this could be 0 through 5.
The default of 0 equates to no expansion terms of any type.}
\item{scale}{Logical scalar indicating whether or not to scale
the likelihood so it integrates to 1. This parameter is used to
stop recursion in other functions.
If \code{scale} equals TRUE, a numerical integration
routine (\code{\link{integration.constant}}) is called, which
in turn calls this likelihood function again
with \code{scale} = FALSE. Thus, this routine knows when its
values are being used to compute the likelihood and when its
value is being used to compute the
constant of integration. All user defined likelihoods must have
and use this parameter.}
\item{pointSurvey}{Boolean. TRUE if \code{dist} is point
transect data, FALSE if line transect data.}
}
\value{
A numeric vector the same length and order as
\code{dist} containing the likelihood contribution for
corresponding distances in \code{dist}.
Assuming \code{L} is the returned vector from one of these
functions, the full log likelihood of all the data is
\code{-sum(log(L), na.rm=T)}. Note that the
returned likelihood value for distances less than
\code{w.lo} or greater than \code{w.hi} is \code{NA},
and thus it is prudent to use \code{na.rm=TRUE} in the
sum. If \code{scale} = TRUE, the integral of the likelihood
from \code{w.lo} to \code{w.hi} is 1.0. If \code{scale} =
FALSE, the integral of the likelihood is
arbitrary.
}
\description{
This function computes likelihood contributions for
off-transect sighting distances, scaled appropriately, for use as
a distance likelihood.
}
\details{
The hazard rate likelihood is
\deqn{f(x|a,b) = 1 - \exp(-(x/\sigma)^{-\beta})}{%
f(x|a,b) = 1 - exp(-(x/Sigma)^(-Beta))}
where \eqn{\sigma}{Sigma} is a variance parameter,
and \eqn{\beta}{Beta}
is a slope parameter to be estimated.
\bold{Expansion Terms}: If \code{expansions} = k
(k > 0), the expansion function specified by
\code{series} is called (see for example
\code{\link{cosine.expansion}}). Assuming
\eqn{h_{ij}(x)}{h_ij(x)} is the \eqn{j^{th}}{j-th}
expansion term for the \eqn{i^{th}}{i-th} distance and that
\eqn{c_1, c_2, \dots, c_k}{c(1), c(2), ..., c(k)} are
(estimated) coefficients for the expansion terms, the
likelihood contribution for the \eqn{i^{th}}{i-th}
distance is, \deqn{f(x|a,b,c_1,c_2,\dots,c_k) = f(x|a,b)(1 +
\sum_{j=1}^{k} c_j h_{ij}(x)).}{%
f(x|a,b,c_1,c_2,...,c_k) = f(x|a,b)(1 + c(1) h_i1(x) +
c(2) h_i2(x) + ... + c(k) h_ik(x)). }
}
\examples{
\dontrun{
x <- seq(0, 100, length=100)
# Plots showing effects of changes in sigma
plot(x, hazrate.like(c(20, 5), x), type="l", col="red")
plot(x, hazrate.like(c(40, 5), x), type="l", col="blue")
# Plots showing effects of changes in beta
plot(x, hazrate.like(c(50, 20), x), type="l", col="red")
plot(x, hazrate.like(c(50, 2), x), type="l", col="blue")
}
}
\seealso{
\code{\link{dfuncEstim}},
\code{\link{halfnorm.like}},
\code{\link{uniform.like}},
\code{\link{negexp.like}},
\code{\link{Gamma.like}}
}
\author{
Trent McDonald, WEST, Inc. \email{tmcdonald@west-inc.com}\cr
Aidan McDonald, WEST, Inc. \email{aidan@mcdcentral.org}
}
\keyword{models}
| /man/hazrate.like.Rd | no_license | wmcdonald1/Rdistance | R | false | true | 5,235 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hazrate.like.R
\name{hazrate.like}
\alias{hazrate.like}
\title{Hazard rate likelihood function for distance analyses}
\usage{
hazrate.like(
a,
dist,
covars = NULL,
w.lo = 0,
w.hi = max(dist),
series = "cosine",
expansions = 0,
scale = TRUE,
pointSurvey = FALSE
)
}
\arguments{
\item{a}{A vector of likelihood parameter values. Length and meaning
depend on \code{series} and \code{expansions}. If no expansion terms
were called for
(i.e., \code{expansions = 0}), the distance likelihoods contain
one or two canonical parameters (see Details). If one or more
expansions are called for,
coefficients for the expansion terms follow coefficients for the
canonical parameters. If \code{p} is the number of canonical
parameters, coefficients
for the expansion terms are \code{a[(p+1):length(a)]}.}
\item{dist}{A numeric vector containing the observed distances.}
\item{covars}{Data frame containing values of covariates at
each observation in \code{dist}.}
\item{w.lo}{Scalar value of the lowest observable distance.
This is the \emph{left truncation} of sighting distances in
\code{dist}. Same units as \code{dist}.
Values less than \code{w.lo} are allowed in \code{dist},
but are ignored and their contribution to the likelihood is
set to \code{NA} in the output.}
\item{w.hi}{Scalar value of the largest observable distance.
This is the \emph{right truncation} of sighting distances in
\code{dist}. Same units as \code{dist}.
Values greater than \code{w.hi} are allowed in \code{dist},
but are ignored and their contribution to the likelihood is
set to \code{NA} in the output.}
\item{series}{A string specifying the type of expansion to use.
Currently, valid values are 'simple', 'hermite', and 'cosine'; but, see
\code{\link{dfuncEstim}} about defining other series.}
\item{expansions}{A scalar specifying the number of terms in
\code{series}. Depending on the series, this could be 0 through 5.
The default of 0 equates to no expansion terms of any type.}
\item{scale}{Logical scalar indicating whether or not to scale
the likelihood so it integrates to 1. This parameter is used to
stop recursion in other functions.
If \code{scale} equals TRUE, a numerical integration
routine (\code{\link{integration.constant}}) is called, which
in turn calls this likelihood function again
with \code{scale} = FALSE. Thus, this routine knows when its
values are being used to compute the likelihood and when its
value is being used to compute the
constant of integration. All user defined likelihoods must have
and use this parameter.}
\item{pointSurvey}{Boolean. TRUE if \code{dist} is point
transect data, FALSE if line transect data.}
}
\value{
A numeric vector the same length and order as
\code{dist} containing the likelihood contribution for
corresponding distances in \code{dist}.
Assuming \code{L} is the returned vector from one of these
functions, the full log likelihood of all the data is
\code{-sum(log(L), na.rm=T)}. Note that the
returned likelihood value for distances less than
\code{w.lo} or greater than \code{w.hi} is \code{NA},
and thus it is prudent to use \code{na.rm=TRUE} in the
sum. If \code{scale} = TRUE, the integral of the likelihood
from \code{w.lo} to \code{w.hi} is 1.0. If \code{scale} =
FALSE, the integral of the likelihood is
arbitrary.
}
\description{
This function computes likelihood contributions for
off-transect sighting distances, scaled appropriately, for use as
a distance likelihood.
}
\details{
The hazard rate likelihood is
\deqn{f(x|a,b) = 1 - \exp(-(x/\sigma)^{-\beta})}{%
f(x|a,b) = 1 - exp(-(x/Sigma)^(-Beta))}
where \eqn{\sigma}{Sigma} is a variance parameter,
and \eqn{\beta}{Beta}
is a slope parameter to be estimated.
\bold{Expansion Terms}: If \code{expansions} = k
(k > 0), the expansion function specified by
\code{series} is called (see for example
\code{\link{cosine.expansion}}). Assuming
\eqn{h_{ij}(x)}{h_ij(x)} is the \eqn{j^{th}}{j-th}
expansion term for the \eqn{i^{th}}{i-th} distance and that
\eqn{c_1, c_2, \dots, c_k}{c(1), c(2), ..., c(k)} are
(estimated) coefficients for the expansion terms, the
likelihood contribution for the \eqn{i^{th}}{i-th}
distance is, \deqn{f(x|a,b,c_1,c_2,\dots,c_k) = f(x|a,b)(1 +
\sum_{j=1}^{k} c_j h_{ij}(x)).}{%
f(x|a,b,c_1,c_2,...,c_k) = f(x|a,b)(1 + c(1) h_i1(x) +
c(2) h_i2(x) + ... + c(k) h_ik(x)). }
}
\examples{
\dontrun{
x <- seq(0, 100, length=100)
# Plots showing effects of changes in sigma
plot(x, hazrate.like(c(20, 5), x), type="l", col="red")
plot(x, hazrate.like(c(40, 5), x), type="l", col="blue")
# Plots showing effects of changes in beta
plot(x, hazrate.like(c(50, 20), x), type="l", col="red")
plot(x, hazrate.like(c(50, 2), x), type="l", col="blue")
}
}
\seealso{
\code{\link{dfuncEstim}},
\code{\link{halfnorm.like}},
\code{\link{uniform.like}},
\code{\link{negexp.like}},
\code{\link{Gamma.like}}
}
\author{
Trent McDonald, WEST, Inc. \email{tmcdonald@west-inc.com}\cr
Aidan McDonald, WEST, Inc. \email{aidan@mcdcentral.org}
}
\keyword{models}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudformation_operations.R
\name{cloudformation_activate_organizations_access}
\alias{cloudformation_activate_organizations_access}
\title{Activate trusted access with Organizations}
\usage{
cloudformation_activate_organizations_access()
}
\description{
Activate trusted access with Organizations. With trusted access between StackSets and Organizations activated, the management account has permissions to create and manage StackSets for your organization.
See \url{https://www.paws-r-sdk.com/docs/cloudformation_activate_organizations_access/} for full documentation.
}
\keyword{internal}
| /cran/paws.management/man/cloudformation_activate_organizations_access.Rd | permissive | paws-r/paws | R | false | true | 671 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudformation_operations.R
\name{cloudformation_activate_organizations_access}
\alias{cloudformation_activate_organizations_access}
\title{Activate trusted access with Organizations}
\usage{
cloudformation_activate_organizations_access()
}
\description{
Activate trusted access with Organizations. With trusted access between StackSets and Organizations activated, the management account has permissions to create and manage StackSets for your organization.
See \url{https://www.paws-r-sdk.com/docs/cloudformation_activate_organizations_access/} for full documentation.
}
\keyword{internal}
|
#######################################################
# load training data
#######################################################
val <- read_csv('/home/rstudio/CNN_model_result/my_train.csv')
M1 <- read_csv('/home/rstudio/CNN_model_result/my_train_EffNB0_stu_freeze_V3.csv') %>% rename(V1 = target)
M2 <- read_csv('/home/rstudio/CNN_model_result/my_train_EffNB0_stu_V2.csv') %>% rename(V2 = target)
M3 <- read_csv("/home/rstudio/CNN_model_result/my_train_EffNB3_stu_freeze_V5.csv") %>% rename(V3 = target)
lm_data <-
left_join(M1, M2, by = c("image_name")) %>%
left_join(.,M3, by = c('image_name')) %>%
left_join(.,val %>% select(image_name, target))
# set.seed(8989)
# # split in train (train_val)
# train_id <- sample(1:nrow(lm_data),nrow(lm_data)*0.99999)
# lm_data_train <- lm_data[train_id,]
# lm_data_val <- lm_data[-train_id,]
# train model
res <- glm(target ~ V1+V2+V3+V1*V2+V1*V3+V2*V3, data = lm_data %>% select(-image_name), family = "binomial")
summary(res)
res2 <- glm(target ~ ., data = lm_data %>% select(-image_name), family = "binomial")
# AUC(y_pred = lm_data_val$V1, y_true = as.vector(lm_data_val$target))
# AUC(y_pred = lm_data_val$V2, y_true = as.vector(lm_data_val$target))
# AUC(y_pred = lm_data_val$V3, y_true = as.vector(lm_data_val$target))
#
# AUC(y_pred = lm_data_val %>% select(-image_name, -target) %>% apply(.,1,mean), y_true = as.vector(lm_data_val$target))
# pre <- predict(res, newdata = lm_data_val %>% select(-image_name, -target), type = "response")
# AUC(y_pred = pre, y_true = as.vector(lm_data_val$target))
# pre <- predict(res2, newdata = lm_data_val %>% select(-image_name, -target), type = "response")
# AUC(y_pred = pre, y_true = as.vector(lm_data_val$target))
#######################################################
# load validation data
#######################################################
val <- read_csv('/home/rstudio/CNN_model_result/my_val.csv')
M1 <- read_csv('/home/rstudio/CNN_model_result/my_val_TTA_EffNB0_stu_freeze_V3.csv') %>% rename(V1 = target)
M2 <- read_csv('/home/rstudio/CNN_model_result/my_val_TTA_EffNB0_stu_V2.csv') %>% rename(V2 = target)
M3 <- read_csv("/home/rstudio/CNN_model_result/my_val_TTA_EffNB3_stu_freeze_V5.csv") %>% rename(V3 = target)
lm_data_val <-
left_join(M1, M2, by = c("image_name")) %>%
left_join(.,M3, by = c('image_name')) %>%
left_join(.,val %>% select(image_name, target))
# performance
AUC(y_pred = lm_data$V1, y_true = as.vector(lm_data$target))
AUC(y_pred = lm_data$V2, y_true = as.vector(lm_data$target))
AUC(y_pred = lm_data$V3, y_true = as.vector(lm_data$target))
AUC(y_pred = lm_data %>% select(-image_name, -target) %>% apply(.,1,mean), y_true = as.vector(lm_data$target))
pre <- predict(res, newdata = lm_data %>% select(-image_name, -target), type = "response")
AUC(y_pred = pre, y_true = as.vector(lm_data$target))
pre <- predict(res2, newdata = lm_data %>% select(-image_name, -target), type = "response")
AUC(y_pred = pre, y_true = as.vector(lm_data$target))
#######################################################
# load testing data
#######################################################
val <- read_csv('/home/rstudio/CNN_model_result/my_test.csv')
M1 <- read_csv('/home/rstudio/CNN_model_result/my_test_TTA_EffNB0_stu_freeze_V3.csv') %>% rename(V1 = target)
M2 <- read_csv('/home/rstudio/CNN_model_result/my_test_TTA_EffNB0_stu_V2.csv') %>% rename(V2 = target)
M3 <- read_csv("/home/rstudio/CNN_model_result/my_test_TTA_EffNB3_stu_freeze_V5.csv") %>% rename(V3 = target)
lm_data <-
left_join(M1, M2, by = c("image_name")) %>%
left_join(.,M3, by = c('image_name')) %>%
left_join(.,val %>% select(image_name))
pre <- predict(res, newdata = lm_data %>% select(-image_name), type = "response")
sub_res <- lm_data %>% select(image_name) %>% mutate(target = pre)
getwd()
write.csv(sub_res, "lm_res.csv", row.names = F)
res <- lm_data %>% mutate( target = (V1+V2+V3)/3) %>% select(image_name, target)
write.csv(res, "mean.csv", row.names = F)
| /lm_for_model_res.R | no_license | borrisphc/kaggle_SIIM_ISIC | R | false | false | 4,033 | r | #######################################################
# load training data
#######################################################
val <- read_csv('/home/rstudio/CNN_model_result/my_train.csv')
M1 <- read_csv('/home/rstudio/CNN_model_result/my_train_EffNB0_stu_freeze_V3.csv') %>% rename(V1 = target)
M2 <- read_csv('/home/rstudio/CNN_model_result/my_train_EffNB0_stu_V2.csv') %>% rename(V2 = target)
M3 <- read_csv("/home/rstudio/CNN_model_result/my_train_EffNB3_stu_freeze_V5.csv") %>% rename(V3 = target)
lm_data <-
left_join(M1, M2, by = c("image_name")) %>%
left_join(.,M3, by = c('image_name')) %>%
left_join(.,val %>% select(image_name, target))
# set.seed(8989)
# # split in train (train_val)
# train_id <- sample(1:nrow(lm_data),nrow(lm_data)*0.99999)
# lm_data_train <- lm_data[train_id,]
# lm_data_val <- lm_data[-train_id,]
# train model
res <- glm(target ~ V1+V2+V3+V1*V2+V1*V3+V2*V3, data = lm_data %>% select(-image_name), family = "binomial")
summary(res)
res2 <- glm(target ~ ., data = lm_data %>% select(-image_name), family = "binomial")
# AUC(y_pred = lm_data_val$V1, y_true = as.vector(lm_data_val$target))
# AUC(y_pred = lm_data_val$V2, y_true = as.vector(lm_data_val$target))
# AUC(y_pred = lm_data_val$V3, y_true = as.vector(lm_data_val$target))
#
# AUC(y_pred = lm_data_val %>% select(-image_name, -target) %>% apply(.,1,mean), y_true = as.vector(lm_data_val$target))
# pre <- predict(res, newdata = lm_data_val %>% select(-image_name, -target), type = "response")
# AUC(y_pred = pre, y_true = as.vector(lm_data_val$target))
# pre <- predict(res2, newdata = lm_data_val %>% select(-image_name, -target), type = "response")
# AUC(y_pred = pre, y_true = as.vector(lm_data_val$target))
#######################################################
# load validation data
#######################################################
val <- read_csv('/home/rstudio/CNN_model_result/my_val.csv')
M1 <- read_csv('/home/rstudio/CNN_model_result/my_val_TTA_EffNB0_stu_freeze_V3.csv') %>% rename(V1 = target)
M2 <- read_csv('/home/rstudio/CNN_model_result/my_val_TTA_EffNB0_stu_V2.csv') %>% rename(V2 = target)
M3 <- read_csv("/home/rstudio/CNN_model_result/my_val_TTA_EffNB3_stu_freeze_V5.csv") %>% rename(V3 = target)
lm_data_val <-
left_join(M1, M2, by = c("image_name")) %>%
left_join(.,M3, by = c('image_name')) %>%
left_join(.,val %>% select(image_name, target))
# performance
AUC(y_pred = lm_data$V1, y_true = as.vector(lm_data$target))
AUC(y_pred = lm_data$V2, y_true = as.vector(lm_data$target))
AUC(y_pred = lm_data$V3, y_true = as.vector(lm_data$target))
AUC(y_pred = lm_data %>% select(-image_name, -target) %>% apply(.,1,mean), y_true = as.vector(lm_data$target))
pre <- predict(res, newdata = lm_data %>% select(-image_name, -target), type = "response")
AUC(y_pred = pre, y_true = as.vector(lm_data$target))
pre <- predict(res2, newdata = lm_data %>% select(-image_name, -target), type = "response")
AUC(y_pred = pre, y_true = as.vector(lm_data$target))
#######################################################
# load testing data
#######################################################
val <- read_csv('/home/rstudio/CNN_model_result/my_test.csv')
M1 <- read_csv('/home/rstudio/CNN_model_result/my_test_TTA_EffNB0_stu_freeze_V3.csv') %>% rename(V1 = target)
M2 <- read_csv('/home/rstudio/CNN_model_result/my_test_TTA_EffNB0_stu_V2.csv') %>% rename(V2 = target)
M3 <- read_csv("/home/rstudio/CNN_model_result/my_test_TTA_EffNB3_stu_freeze_V5.csv") %>% rename(V3 = target)
lm_data <-
left_join(M1, M2, by = c("image_name")) %>%
left_join(.,M3, by = c('image_name')) %>%
left_join(.,val %>% select(image_name))
pre <- predict(res, newdata = lm_data %>% select(-image_name), type = "response")
sub_res <- lm_data %>% select(image_name) %>% mutate(target = pre)
getwd()
write.csv(sub_res, "lm_res.csv", row.names = F)
res <- lm_data %>% mutate( target = (V1+V2+V3)/3) %>% select(image_name, target)
write.csv(res, "mean.csv", row.names = F)
|
#load in data
ListingPrice <- read.csv("County_MedianListingPrice_AllHomes.csv")
SoldPrice <- read.csv("County_MedianSoldPrice_AllHomes.csv")
#get rid of layers in the states
States <- as.character(ListingPrice$State)
StatesSold <- as.character(SoldPrice$State)
#declare listing and sold prices variables for CA
ListingCA <- data.frame(ListingPrice[0,])
SoldCA <- data.frame(SoldPrice[0,])
#loop to only keep listings and sold houses in California
for(n in 1:3078)
{
if(grepl("CA", States[n])){
ListingCA[n,] <- ListingPrice[n,]
}
if(grepl("CA", StatesSold[n])){
SoldCA[n,] <- SoldPrice[n,]
}
}
#omit NAs
ListingCA <- na.omit(ListingCA)
SoldCA <- na.omit(SoldCA)
#ListingCA has repeats, so we need to get rid of those
ListingCA <- unique(ListingCA)
#get rid of columns that give extra information about region for easier plotting
PListingCA <- ListingCA[c(1, 6:82)]
PSoldCA <- SoldCA[c(1, 6:234)]
#separate Santa Clara data
SCSold <- PSoldCA[6,]
SCSold <- SCSold[c(2:230)]
#put Santa Clara in graphing format
Date = 0
Price = 0
for(n in 1:229)
{
Date[n] <- names(SCSold)[n]
Price[n] <- SCSold[1,n]
}
SCSoldGraph <- data.frame(Date, Price)
SCSoldGraph$Date <- as.numeric(SCSoldGraph$Date)
#put date in correct format
a = 23956/12
b = 1/12
for(n in 1:229)
{
SCSoldGraph$Date[n] <- a + (b*(n-1))
}
#make graph with ggplot
install.packages("ggplot2")
library(ggplot2)
graph1 <- ggplot(data=SCSoldGraph, aes(x = Date, y = Price)) + geom_point(colour = "red")
graph1 + labs(title = "Price Changes in Santa Clara Homes", y = "Median Price in Dollars")
#extract Santa Clara data
SCListing <- PListingCA[6,]
SCListing <- SCListing[c(2:78)]
#take out the columns in SCSold that don't have corresponding values in SCListing
SCSoldCut <- SCSold[c(151:227)]
#put Santa Clara comparison in graphing format
Date = 0
Listing = 0
Sold = 0
for(n in 1:77)
{
Date[n] <- names(SCSoldCut)[n]
Listing[n] <- SCListing[1,n]
Sold[n] <- SCSoldCut[1,n]
}
SCComparisonGraph <- data.frame(Date, Listing, Sold)
SCComparisonGraph$Date <- as.numeric(SCComparisonGraph$Date)
#change date to correct format
a = 24106/12
b = 1/12
for(n in 1:77)
{
SCComparisonGraph$Date[n] <- a + (b*(n-1))
}
#make graph with ggplot to contrast listing and sold prices in Santa Clara
graph2 <- ggplot() +
# blue plot
geom_point(data=SCComparisonGraph, aes(x=Date, y=Listing), colour = "blue") +
#geom_smooth(data=SCComparisonGraph, aes(x=Date, y=Listing), fill="blue",
# colour="darkblue", size=1) +
# red plot
geom_point(data=SCComparisonGraph, aes(x=Date, y=Sold), colour = "red")# +
#geom_smooth(data=SCComparisonGraph, aes(x=Date, y=Sold), fill="red",
# colour="red", size=1)
graph2 + labs(title = "Listing and Sold Prices in Santa Clara Homes", y = "Listing(Blue) vs. Sold(Red) Price in Dollars")
#bring in rental data
RentalPrice <- read.csv("County_MedianRentalPrice_AllHomes.csv")
#separate Santa Clara rental data
SCRent <- RentalPrice[17,]
SCRent <- SCRent[c(6:68)]
#bring in sold data for comparison
SCSoldRentComp <- SCSold[c(167:229)]
#find mortgage prices for sold homes with 4% interest rate
Mortgage = 0
i = 0.04/12
for(n in 1:63)
{
Mortgage[n] <- ((SCSoldRentComp[1,n]-50000)*(i*(1 + i)^360)/(((1 + i)^360)-1) + ((SCSoldRentComp[1,n]*0.0125)/12))
}
#put data in correct format
Date = 0
Rent = 0
for(n in 1:63)
{
Date[n] <- names(SCRent)[n]
Rent[n] <- SCRent[1,n]
}
SCRentGraph <- data.frame(Date, Rent, Mortgage)
SCRentGraph$Date <- as.numeric(SCRentGraph$Date)
#change date to correct format
a = 24122/12
b = 1/12
for(n in 1:63)
{
SCRentGraph$Date[n] <- a + (b*(n-1))
}
#make graph with ggplot to contrast listing and sold prices in Santa Clara
graph3 <- ggplot() +
# blue plot
geom_point(data=SCRentGraph, aes(x=Date, y=Rent), colour = "green") +
# red plot
geom_point(data=SCRentGraph, aes(x=Date, y=Mortgage), colour = "violet")
graph3 + labs(title = "Renting vs Mortgage & Property Tax Monthly Prices in Santa Clara Homes", y = "Rent(Green) vs. Mortgage&Tax(Violet) Price in Dollars")
#Use sold data to predict median home price in 30 years
lm(formula = Price~Date, data = SCSoldGraph)
#check accuracy
summary(lm(formula = Price~Date, data = SCSoldGraph))
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -4.479e+07 1.909e+06 -23.46 <2e-16 ***
# Date 2.259e+04 9.517e+02 23.73 <2e-16 ***
#significant p-value, so good
#Coefficients:
# (Intercept) Date
# -44792323 22588
PriceThirty = (-44792323 + 22588*(24544/12))
#1407666
#make a linear regression plot that shows the expected rise in value
Date = 0
Price = 0
for(n in 1:229)
{
Date[n] <- names(SCSold)[n]
Price[n] <- SCSold[1,n]
}
Date[230] = (24544/12)
Price[230] = PriceThirty
SCPredictGraph <- data.frame(Date, Price)
SCPredictGraph$Date <- as.numeric(SCPredictGraph$Date)
a = 23956/12
b = 1/12
for(n in 1:229)
{
SCPredictGraph$Date[n] <- a + (b*(n-1))
}
SCPredictGraph$Date[230] = (24544/12)
graph4 <- ggplot(data=SCPredictGraph, aes(x = Date, y = Price)) + geom_point(colour = "red") + geom_smooth(method=lm, se=FALSE, fullrange=T, colour = "red")
graph4 + labs(title = "Predicted Rise in Santa Clara Home Prices", y = "Line of Predicted Median Price in Dollars")
#show total payment with a mortgage and how that compares to expected house value
TotalPayment = (SCRentGraph$Mortgage[63]*360)
#1552382
Total = TotalPayment - PriceThirty
#144715.3
#predict rent values for the next 30 years
lm(formula = Rent~Date, data = SCRentGraph)
TotalRent = 0
MonthlyRent = 0
for(n in 1:360)
{
TotalRent = (TotalRent + (-279116.0 + ((24183+n)/12)*139.8))
MonthlyRent[n] <- (-279116.0 + ((24183+n)/12)*139.8)
}
#check for significance
summary(lm(formula = Rent ~ Date, data = SCRentGraph))
#significant p-values
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -279116.01 47264.39 -5.905 1.68e-07 ***
# Date 139.79 23.48 5.953 1.40e-07 ***
#graphical representation
graph5 <- ggplot(data=SCRentGraph, aes(x = Date, y = Rent), colour = "green") + geom_point(colour = "green") + geom_smooth(method=lm, se=FALSE, fullrange=T, colour = "green")
graph5 + labs(title = "Predicted Rise in Santa Clara Rent Prices", y = "Line of Predicted Median Rent Price in Dollars") + xlim(2010, 2046)
TotalRent
#1698759
#let's see what happens if we don't even consider the changing price of rent average
OptimisticRent = (SCRentGraph$Rent[63])*360
OptimisticRent
#1017000
#show how much is spent overall in a bar graph
Overall <- data.frame(type = factor(c("Rent", "Mortgage", "Predicted Median Home Value")),
total = c(TotalRent, TotalPayment, PriceThirty))
graph6 <- ggplot(data = Overall, aes(x=type, y=total, fill=type)) +
geom_bar(stat="identity")
graph6 + labs(title = "What You Paid After 30 Years", x = "", y = "Price in Dollars") +
scale_fill_manual(values=c("green", "violet", "red")) + guides(fill=FALSE)
DifferenceOverall <- data.frame(type = factor(c("Rent", "Mortgage")),
total = c(TotalRent, Total))
#show the difference of how much you "lost" in housing after 30 years
graph7 <- ggplot(data = DifferenceOverall, aes(x=type, y=total, fill=type)) +
geom_bar(stat="identity")
graph7 + labs(title = "How Much is Actually Spent After 30 Years (2015 to 2045 Prediction)", x = "", y = "Price in Dollars") +
scale_fill_manual(values=c("green", "violet")) + guides(fill=FALSE)
| /Blog.R | no_license | alejandrayllon/BlogProject | R | false | false | 7,517 | r | #load in data
ListingPrice <- read.csv("County_MedianListingPrice_AllHomes.csv")
SoldPrice <- read.csv("County_MedianSoldPrice_AllHomes.csv")
#get rid of layers in the states
States <- as.character(ListingPrice$State)
StatesSold <- as.character(SoldPrice$State)
#declare listing and sold prices variables for CA
ListingCA <- data.frame(ListingPrice[0,])
SoldCA <- data.frame(SoldPrice[0,])
#loop to only keep listings and sold houses in California
for(n in 1:3078)
{
if(grepl("CA", States[n])){
ListingCA[n,] <- ListingPrice[n,]
}
if(grepl("CA", StatesSold[n])){
SoldCA[n,] <- SoldPrice[n,]
}
}
#omit NAs
ListingCA <- na.omit(ListingCA)
SoldCA <- na.omit(SoldCA)
#ListingCA has repeats, so we need to get rid of those
ListingCA <- unique(ListingCA)
#get rid of columns that give extra information about region for easier plotting
PListingCA <- ListingCA[c(1, 6:82)]
PSoldCA <- SoldCA[c(1, 6:234)]
#separate Santa Clara data
SCSold <- PSoldCA[6,]
SCSold <- SCSold[c(2:230)]
#put Santa Clara in graphing format
Date = 0
Price = 0
for(n in 1:229)
{
Date[n] <- names(SCSold)[n]
Price[n] <- SCSold[1,n]
}
SCSoldGraph <- data.frame(Date, Price)
SCSoldGraph$Date <- as.numeric(SCSoldGraph$Date)
#put date in correct format
a = 23956/12
b = 1/12
for(n in 1:229)
{
SCSoldGraph$Date[n] <- a + (b*(n-1))
}
#make graph with ggplot
install.packages("ggplot2")
library(ggplot2)
graph1 <- ggplot(data=SCSoldGraph, aes(x = Date, y = Price)) + geom_point(colour = "red")
graph1 + labs(title = "Price Changes in Santa Clara Homes", y = "Median Price in Dollars")
#extract Santa Clara data
SCListing <- PListingCA[6,]
SCListing <- SCListing[c(2:78)]
#take out the columns in SCSold that don't have corresponding values in SCListing
SCSoldCut <- SCSold[c(151:227)]
#put Santa Clara comparison in graphing format
Date = 0
Listing = 0
Sold = 0
for(n in 1:77)
{
Date[n] <- names(SCSoldCut)[n]
Listing[n] <- SCListing[1,n]
Sold[n] <- SCSoldCut[1,n]
}
SCComparisonGraph <- data.frame(Date, Listing, Sold)
SCComparisonGraph$Date <- as.numeric(SCComparisonGraph$Date)
#change date to correct format
a = 24106/12
b = 1/12
for(n in 1:77)
{
SCComparisonGraph$Date[n] <- a + (b*(n-1))
}
#make graph with ggplot to contrast listing and sold prices in Santa Clara
graph2 <- ggplot() +
# blue plot
geom_point(data=SCComparisonGraph, aes(x=Date, y=Listing), colour = "blue") +
#geom_smooth(data=SCComparisonGraph, aes(x=Date, y=Listing), fill="blue",
# colour="darkblue", size=1) +
# red plot
geom_point(data=SCComparisonGraph, aes(x=Date, y=Sold), colour = "red")# +
#geom_smooth(data=SCComparisonGraph, aes(x=Date, y=Sold), fill="red",
# colour="red", size=1)
graph2 + labs(title = "Listing and Sold Prices in Santa Clara Homes", y = "Listing(Blue) vs. Sold(Red) Price in Dollars")
#bring in rental data
RentalPrice <- read.csv("County_MedianRentalPrice_AllHomes.csv")
#separate Santa Clara rental data
SCRent <- RentalPrice[17,]
SCRent <- SCRent[c(6:68)]
#bring in sold data for comparison
SCSoldRentComp <- SCSold[c(167:229)]
#find mortgage prices for sold homes with 4% interest rate
Mortgage = 0
i = 0.04/12
for(n in 1:63)
{
Mortgage[n] <- ((SCSoldRentComp[1,n]-50000)*(i*(1 + i)^360)/(((1 + i)^360)-1) + ((SCSoldRentComp[1,n]*0.0125)/12))
}
#put data in correct format
Date = 0
Rent = 0
for(n in 1:63)
{
Date[n] <- names(SCRent)[n]
Rent[n] <- SCRent[1,n]
}
SCRentGraph <- data.frame(Date, Rent, Mortgage)
SCRentGraph$Date <- as.numeric(SCRentGraph$Date)
#change date to correct format
a = 24122/12
b = 1/12
for(n in 1:63)
{
SCRentGraph$Date[n] <- a + (b*(n-1))
}
#make graph with ggplot to contrast listing and sold prices in Santa Clara
graph3 <- ggplot() +
# blue plot
geom_point(data=SCRentGraph, aes(x=Date, y=Rent), colour = "green") +
# red plot
geom_point(data=SCRentGraph, aes(x=Date, y=Mortgage), colour = "violet")
graph3 + labs(title = "Renting vs Mortgage & Property Tax Monthly Prices in Santa Clara Homes", y = "Rent(Green) vs. Mortgage&Tax(Violet) Price in Dollars")
#Use sold data to predict median home price in 30 years
lm(formula = Price~Date, data = SCSoldGraph)
#check accuracy
summary(lm(formula = Price~Date, data = SCSoldGraph))
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -4.479e+07 1.909e+06 -23.46 <2e-16 ***
# Date 2.259e+04 9.517e+02 23.73 <2e-16 ***
#significant p-value, so good
#Coefficients:
# (Intercept) Date
# -44792323 22588
PriceThirty = (-44792323 + 22588*(24544/12))
#1407666
#make a linear regression plot that shows the expected rise in value
Date = 0
Price = 0
for(n in 1:229)
{
Date[n] <- names(SCSold)[n]
Price[n] <- SCSold[1,n]
}
Date[230] = (24544/12)
Price[230] = PriceThirty
SCPredictGraph <- data.frame(Date, Price)
SCPredictGraph$Date <- as.numeric(SCPredictGraph$Date)
a = 23956/12
b = 1/12
for(n in 1:229)
{
SCPredictGraph$Date[n] <- a + (b*(n-1))
}
SCPredictGraph$Date[230] = (24544/12)
graph4 <- ggplot(data=SCPredictGraph, aes(x = Date, y = Price)) + geom_point(colour = "red") + geom_smooth(method=lm, se=FALSE, fullrange=T, colour = "red")
graph4 + labs(title = "Predicted Rise in Santa Clara Home Prices", y = "Line of Predicted Median Price in Dollars")
#show total payment with a mortgage and how that compares to expected house value
TotalPayment = (SCRentGraph$Mortgage[63]*360)
#1552382
Total = TotalPayment - PriceThirty
#144715.3
#predict rent values for the next 30 years
lm(formula = Rent~Date, data = SCRentGraph)
TotalRent = 0
MonthlyRent = 0
for(n in 1:360)
{
TotalRent = (TotalRent + (-279116.0 + ((24183+n)/12)*139.8))
MonthlyRent[n] <- (-279116.0 + ((24183+n)/12)*139.8)
}
#check for significance
summary(lm(formula = Rent ~ Date, data = SCRentGraph))
#significant p-values
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -279116.01 47264.39 -5.905 1.68e-07 ***
# Date 139.79 23.48 5.953 1.40e-07 ***
#graphical representation
graph5 <- ggplot(data=SCRentGraph, aes(x = Date, y = Rent), colour = "green") + geom_point(colour = "green") + geom_smooth(method=lm, se=FALSE, fullrange=T, colour = "green")
graph5 + labs(title = "Predicted Rise in Santa Clara Rent Prices", y = "Line of Predicted Median Rent Price in Dollars") + xlim(2010, 2046)
TotalRent
#1698759
#let's see what happens if we don't even consider the changing price of rent average
OptimisticRent = (SCRentGraph$Rent[63])*360
OptimisticRent
#1017000
#show how much is spent overall in a bar graph
Overall <- data.frame(type = factor(c("Rent", "Mortgage", "Predicted Median Home Value")),
total = c(TotalRent, TotalPayment, PriceThirty))
graph6 <- ggplot(data = Overall, aes(x=type, y=total, fill=type)) +
geom_bar(stat="identity")
graph6 + labs(title = "What You Paid After 30 Years", x = "", y = "Price in Dollars") +
scale_fill_manual(values=c("green", "violet", "red")) + guides(fill=FALSE)
DifferenceOverall <- data.frame(type = factor(c("Rent", "Mortgage")),
total = c(TotalRent, Total))
#show the difference of how much you "lost" in housing after 30 years
graph7 <- ggplot(data = DifferenceOverall, aes(x=type, y=total, fill=type)) +
geom_bar(stat="identity")
graph7 + labs(title = "How Much is Actually Spent After 30 Years (2015 to 2045 Prediction)", x = "", y = "Price in Dollars") +
scale_fill_manual(values=c("green", "violet")) + guides(fill=FALSE)
|
\name{zoeppritz}
\alias{zoeppritz}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Zoeppritz Equations}
\description{
Calculate the P and S-wave scattering amplitudes for a plane
wave at an interface.
}
\usage{
zoeppritz(icoef, vp1, vp2, vs1, vs2, rho1, rho2, incw)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{icoef}{type of out put Amplitude=1, Potential=2, Energy=3 }
\item{vp1}{P-wave Velocity of Upper Layer, km/s}
\item{vp2}{P-wave Velocity of Lower Layer, km/s}
\item{vs1}{S-wave Velocity of Upper Layer, km/s}
\item{vs2}{S-wave Velocity of Lower Layer, km/s}
\item{rho1}{Density of Upper Layer, kg/m3}
\item{rho2}{Density of Lower Layer, kg/m3}
\item{incw}{integer,Incident Wave: P=1, S=2 }
}
\details{
Coeficiants are calculated at angles from 0-90 degrees. Zero is
returned where coefficients are imaginary.
}
\value{
List:
\item{angle}{Incident angles (degrees)}
\item{rmat}{Matrix of 4 by n reflection coefficients for each angle}
\item{rra}{Matrix of 4 by n real part of scattering matrix}
\item{rra}{Matrix of 4 by n imaginary part of scattering matrix}
\item{ang}{Matrix of 4 by n phase angle}
\item{incw}{integer, from input parameter }
\item{icoef}{integer, from input parameter }
}
\references{
Young, G.B., Braile, L. W. 1976. A computer program for the application
of Zoeppritz's amplitude equations and Knott's energy equations,
\emph{ Bulletin of the Seismological Society of America}, vol.66,
no.6,1881-1885.
K. Aki and P.G. Richards.\emph{Quantitative seismology}.
University Science Books, Sausalito, Calif., 2nd edition, 2002.
}
\author{Jonathan M. Lees<jonathan.lees@unc.edu>}
\note{
Based on the fortran algorithm in Young and Braile.
Uses a linear approximation by Aki and Richards.
}
\seealso{pzoeppritz, plotzoeppritz}
\examples{
######### set up 2-layer model
alpha1 = 4.98
beta1 = 2.9
rho1 = 2.667
alpha2 = 8.0
beta2 = 4.6
rho2 = 3.38
################### P-wave incident = 1
incw=1;
icoef=1
A = zoeppritz(icoef, alpha1, alpha2, beta1, beta2, rho1,rho2, incw)
plot(A$angle, A$rmat[,1], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="P-wave incident/P-wave Reflected" )
plot(A$angle, A$rmat[,2], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="P-wave incident/S-wave Reflected" )
plot(A$angle, A$rmat[,3], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="P-wave incident/P-wave Refracted" )
plot(A$angle, A$rmat[,4], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="P-wave incident/S-wave Refracted" )
################### S-wave incident = 2
incw=2
icoef=1
A = zoeppritz(icoef, alpha1, alpha2, beta1, beta2, rho1,rho2, incw)
plot(A$angle, A$rmat[,1], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="S-wave incident/P-wave Reflected" )
plot(A$angle, A$rmat[,2], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="S-wave incident/S-wave Reflected" )
plot(A$angle, A$rmat[,3], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="S-wave incident/P-wave Refracted" )
plot(A$angle, A$rmat[,4], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="S-wave incident/S-wave Refracted" )
}
\keyword{misc}
| /man/zoeppritz.Rd | no_license | cran/zoeppritz | R | false | false | 3,262 | rd | \name{zoeppritz}
\alias{zoeppritz}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Zoeppritz Equations}
\description{
Calculate the P and S-wave scattering amplitudes for a plane
wave at an interface.
}
\usage{
zoeppritz(icoef, vp1, vp2, vs1, vs2, rho1, rho2, incw)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{icoef}{type of out put Amplitude=1, Potential=2, Energy=3 }
\item{vp1}{P-wave Velocity of Upper Layer, km/s}
\item{vp2}{P-wave Velocity of Lower Layer, km/s}
\item{vs1}{S-wave Velocity of Upper Layer, km/s}
\item{vs2}{S-wave Velocity of Lower Layer, km/s}
\item{rho1}{Density of Upper Layer, kg/m3}
\item{rho2}{Density of Lower Layer, kg/m3}
\item{incw}{integer,Incident Wave: P=1, S=2 }
}
\details{
Coeficiants are calculated at angles from 0-90 degrees. Zero is
returned where coefficients are imaginary.
}
\value{
List:
\item{angle}{Incident angles (degrees)}
\item{rmat}{Matrix of 4 by n reflection coefficients for each angle}
\item{rra}{Matrix of 4 by n real part of scattering matrix}
\item{rra}{Matrix of 4 by n imaginary part of scattering matrix}
\item{ang}{Matrix of 4 by n phase angle}
\item{incw}{integer, from input parameter }
\item{icoef}{integer, from input parameter }
}
\references{
Young, G.B., Braile, L. W. 1976. A computer program for the application
of Zoeppritz's amplitude equations and Knott's energy equations,
\emph{ Bulletin of the Seismological Society of America}, vol.66,
no.6,1881-1885.
K. Aki and P.G. Richards.\emph{Quantitative seismology}.
University Science Books, Sausalito, Calif., 2nd edition, 2002.
}
\author{Jonathan M. Lees<jonathan.lees@unc.edu>}
\note{
Based on the fortran algorithm in Young and Braile.
Uses a linear approximation by Aki and Richards.
}
\seealso{pzoeppritz, plotzoeppritz}
\examples{
######### set up 2-layer model
alpha1 = 4.98
beta1 = 2.9
rho1 = 2.667
alpha2 = 8.0
beta2 = 4.6
rho2 = 3.38
################### P-wave incident = 1
incw=1;
icoef=1
A = zoeppritz(icoef, alpha1, alpha2, beta1, beta2, rho1,rho2, incw)
plot(A$angle, A$rmat[,1], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="P-wave incident/P-wave Reflected" )
plot(A$angle, A$rmat[,2], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="P-wave incident/S-wave Reflected" )
plot(A$angle, A$rmat[,3], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="P-wave incident/P-wave Refracted" )
plot(A$angle, A$rmat[,4], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="P-wave incident/S-wave Refracted" )
################### S-wave incident = 2
incw=2
icoef=1
A = zoeppritz(icoef, alpha1, alpha2, beta1, beta2, rho1,rho2, incw)
plot(A$angle, A$rmat[,1], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="S-wave incident/P-wave Reflected" )
plot(A$angle, A$rmat[,2], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="S-wave incident/S-wave Reflected" )
plot(A$angle, A$rmat[,3], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="S-wave incident/P-wave Refracted" )
plot(A$angle, A$rmat[,4], xlab="Incident Angle", ylab="Ratio of Amplitudes",
main="S-wave incident/S-wave Refracted" )
}
\keyword{misc}
|
#Importing the dataset
library(arules)
dataset = read.csv('Market_Basket_Optimisation.csv', header = FALSE)
dataset = read.transactions('Market_Basket_Optimisation.csv', sep = ',', rm.duplicates = TRUE)
summary(dataset)
itemFrequencyPlot(dataset, topN = 10)
#Training the ECLAT model
rules = eclat(data = dataset, parameter = list(support = 0.004, minlen = 2))
#Visualizing the results
inspect(sort(rules, by = 'support')[1:10]) | /Association Rule Learning/Eclat (Association Rule Learning)/eclat.R | no_license | mcandeo/machine-learning-R | R | false | false | 430 | r | #Importing the dataset
library(arules)
dataset = read.csv('Market_Basket_Optimisation.csv', header = FALSE)
dataset = read.transactions('Market_Basket_Optimisation.csv', sep = ',', rm.duplicates = TRUE)
summary(dataset)
itemFrequencyPlot(dataset, topN = 10)
#Training the ECLAT model
rules = eclat(data = dataset, parameter = list(support = 0.004, minlen = 2))
#Visualizing the results
inspect(sort(rules, by = 'support')[1:10]) |
################################################################
#
#
# INTRO TO R
#
# The R website:
#
# http://cran.r-project.org
#
# (Google "R" -> one of the first entries)
#
# Downloading R:
#
# -> Sidebar "Download, Packages": CRAN
# -> any site in the US
# -> Windows
# -> base
# -> "Download R-2.... for Windows (32 bit build)"
# -> installation dialog in setup wizzard
#
# The setup program should self-install and create an icon on your desktop.
# Clicking the icon should open up an R interpreter window ("R Gui").
#
# The base is really just the base. There are many contributed
# library packages whose binaries can be downloaded from
#
# -> Packages
#
# You will not have to download them explicitly, though;
# there are R functions that allow you to get them while running R.
# In the R Gui you can also go through the "Packages" item in the toolbar.
#
################
#
# OPERATION OF R:
#
# * For non-Emacs users:
#
# 0) go to the class web page and download this file.
# Open an editor (Word, Wordpad,...) on this file (change .R to .txt)
# AND open up an R GUI window by clicking on the R icon.
# Reduce the size of both windows so both are accessible.
#
# 1) Copy R code from this file into the R interpreter window.
# Use shortcuts: In the editor highlight lines, hit <Ctrl>-C,
# then move to the R window and hit <Ctrl>-V.
# Examples:
1+2
1:10
2^(1:20)
runif(10)
rnorm(10)
1:10 + rnorm(10)
#
# 2) Experiment with R code
# by editing THIS file in the editor window, or
# by editing the command line in the R window (if it's one-liners).
#
# Commands for line editing in the R interpreter window:
# Note: "^p" means you hold down the modifier key <Ctrl> and hit "p",
# just like the modifier key <Shift> used for capitalization
# ^p get back the previous command line for editing and executing
# repeating ^p goes further back in the command history
# ^b step back one character in the command line
# ^f step forward one character in the command line
# ^a move to the beginning of the line
# ^e move to the end of the line
# ^d or <Delete> to delete the character under the cursor
# ^h or <Backspace> to delete the previous character
# ^k kill the rest of the line starting from the cursor
# otherwise: you are in insert mode
# (These editing commands are borrowed from the Emacs editor.)
#
#
# * For Emacs users:
#
# download the ESS macros ("Emacs Speaks Statistics") from r-project.org:
# -> R GUIs -> Emacs (ESS) -> Downloads
# Download the latest zip or tar.gz file.
# Unpack and install; ESS should work right away. Skip to "Operation:" and try.
# If it doesn't work right away,
# you may have to put these lines in your .emacs file:
## (setq inferior-R-program-name "c:/Program Files/R/R-2.7.1pat/bin/Rterm.exe")
# ^^^^ path to your R executable ^^^^
## (load-file "c:/EMACS/ESS/ess-5.3.0/lisp/ess-site.el")
# ^^^path to the file "ess-site.el"^^^
#
# Operation:
# - Split the Emacs window into two windows:
# ^x 2
# - Edit THIS file in the upper window.
# ^x ^f filepath <Enter>
# - Start R in the lower window:
# ^x o (move the cursor to the lower window)
# <Alt>-x R (start R inside Emacs)
# - If you like to shrink one of the windows, put this line in your .emacs:
# (global-set-key "\M-o" (lambda () (interactive) (shrink-window 1)))
# Then <Alt>-o will shrink the present window and expand
# the other window by one line.
# - There are macros to copy and execute lines, functions, and regions
# from the upper buffer into the lower buffer:
# ^c ^j (execute current line and leave the cursor in place)
# ^c ^n (execute current line and move to next line of R code)
# ^c ^f (execute function, assuming the cursor is
# inside its body)
# ^c ^r (execute region)
# - A small nuisance is that the lower (R) window does not move
# the bottom line to the center after executing an expression.
# This can be fixed by putting the following in your .emacs:
# (global-set-key "\M-s" "\C-xo\M->\C-l\C-xo")
# Then <Alt>-s will move the bottom line to the center.
#
#
##################
#
#
# * R is an INTERPRETED LANGUAGE:
# Users type expressions and see results immediately.
# Example:
for(i in 1:10) { if(i%%2==0) print(i) }
# As opposed to:
# - ... languages (C, Fortran)
# - ... software (such as SAS' JMP)
#
#
# * R is HIGH-LEVEL:
# It operates on complex data structures such as
# vectors, matrices, arrays, lists, dataframes,
# as opposed to C and Fortran that operate on individual numbers only.
# (This requires some getting used to for C programmers.)
#
#
# * PRIMARY BEHAVIOR: Whatever is typed, print the results.
2
print(2) # same
"a"
print("a") # same
# (Q: Why is there '[1]' preceding the results? A: ...)
# Vector of length greater than 1:
1:3
print(1:3) # same
#
#
# * SYNTAX:
# - Largely scientific/math notation; base 10.
# - A wealth of functions.
# - Comments run from a "#" to the end of the line; no multiline comments.
# - Spaces are irrelevant, except inside strings:
2+3; 2 + 3; "ab"; "a b"
# - Statements can run over multiple lines:
2 + 3 + # \
4 # / One statement
# But if a statement is syntactically complete at
# the end of the line, it won't continue:
2 + 3 # \
+ 4 # / Two statements
# - Statements can be separated by ";".
2; 3^3; sqrt(9)
#
#---
#
# * BASIC DATA TYPES:
#
#
# - NUMERIC: double precision by default (How many bytes?)
# Integers are represented as doubles, although the print function
# shows them as integer:
-2.000
1E5
2E-3
# The usual unary and binary operations and analytic functions:
# +, -, *, /, %%, %/%, ^, log, sqrt, sin, acos...
2+3 # Add.
5.3*1E10 # Multiply.
10%%3 # Modulo.
exp(1) # Exponentiation.
log(2.718282) # Log of the number 'e'; 'log' is e-based.
log10(10) # 10-based log
pi # That's the number 'greek pi', 3.14159
sin(pi/2) # Angles are to be given in arcs, not degrees.
sin(pi) # Dito.
acos(0) # This is the inverse of cos, arccos, hence pi/2.
pi/2 # This is the only hardwired constant: 3.14159...
#
#
# - STRINGS: can be single or double quoted, but the print function
# uses double quotes.
'a'; "a"; 'abc'; "abc"
# (In C and Python strings are character vectors.
# In R strings are basic types; there is no single character type.
# Characters are just strings of length 1.
# There is no indexed access to individual characters and
# substrings in R; one uses the "substring" function instead:
substring("abcxyz",4,6)
# Other basic string manipulations:
paste("my","word")
nchar("Supercalifragilistikexpialidocious")
# There are two hardwired character vectors that contain the lower and
# upper case letters:
letters
LETTERS
#
#
# - LOGICAL values: have two names each, but the print function
# always uses the longer.
TRUE; FALSE; T; F
# They are implemented as the values 1 and 0 for T and F, respectively.
# They are the result of the usual comparisons: <, >, <=, >=, ==, !=
1<2; 1>2; "ab" <= "abcd"
"ab" > "ac"; "ab" != "AB"
"ab" != 2; 0==F; 1==T
#
#
# - MISSING values NA, Inf, -Inf:
NA; NaN; Inf; -Inf; 1/0; Inf==1/0; 0/0
# Watch out: the following does not give T!!!
NA==1
# If you want to test for NA, you must use the function is.na():
is.na(NA)
#
#
# - FUNCTIONS:
# * R is a FUNCTIONAL LANGUAGE:
# Functions return values that in turn can be arguments to functions.
# Expressions evaluate inside out, e.g., log(2*2.5))^3:
2.5; 2*2.5; log(2*2.5); log(2*2.5)^3
#
#
# * STATEMENTS/EXPRESSIONS:
# There are two types of expressions: assignments and side effects.
# 1) Assignments allocate data structures and
# make variables point to them.
x <- 1:3 # Allocate a vector 1,2,3 and make 'x' point to it.
# 2) Side effects are essentially display operations
# such as printing, plotting, showing help; unlike assignments,
# they don't change the computational state of the R system.
x
print(x)
plot(x)
help("+") # Show the help page of addition.
help(sqrt) # Show the help page of the square root function.
help("sqrt") # Dito.
# 3) Composite Statements:
{print(1); plot(1:10)}
# Will be needed in loops and functions.
#
#
# - Assignments to variables come in four equivalent syntactic forms:
# x <- ...
# x = ...
# ... -> x
# assign("x",...)
# Examples:
x <- sqrt(2 * 3) # Root of product of 2 and 3
x = sqrt(2 * 3) # Both can be used: '=' and '<-'
sqrt(2 * 3) -> x # This can be used, too, if you must...
y <- c(1,3,10,1,1,1111,0,1,1) # combine 1,3,10... into a vector 'y'
z <- 1:3 # Assign the vector containing 1,2,3 to a 'z'.
assign("x", sqrt(2*4)); print(x)
# Note that variables jump into existence upon assignment.
# Unlike C and Fortran, there is no need to declare variables.
# The variables are not 'typed', that is, any variable can
# point to data of any type, such as numbers, logicals, vectors,...
#
#---
#
# * HELP: help(function) or help("function") shows function documentation.
help(sqrt)
# (Emacs users: call help.start() before using help.)
# In the output of this function, check out the section
# with header "See Also:". It will tell you that you
# can find related functions by calling
help("Arithmetic")
help("log")
help("sin")
help("Special")
#
help(c)
help("c") # Same as help(c)
help("*") # help(*) does not work
#
# * APROPOS: apropos("char") lists all functions whose name contains
# the string "char".
apropos("char")
# This is often useful for finding related functions.
# Apropos combined with the section "See Also:" in the output
# of help() is a powerful tool for searching functions.
# There are about 1,700 built-in functions, and more if you
# download special-purpose packages from the R website.
#
# * Printing a function: allows you to see the arguments in a simple way
runif # same as: print(runif)
rnorm # (functions are "first class citizens", like numbers, vectors,...)
#
#---
#
# * MANAGEMENT OF DATA AND FUNCTIONS:
# - Listing R objects, both data and functions: either of
ls(); objects()
# This lists all data structures and functions that YOU defined.
# - Removing data and functions:
x <- 1:10
rm(x)
x
# - Looking for partly remembered data and functions:
# In case you remember only part of a name, you can look it up
# with a partial pattern:
xx <- 10
ls(pattern="x")
# This will list any dataset and function whose name contains "x"
# such as 'last.warning'.
# - List all functions that come with the base package of R:
ls("package:base") # Over 1,100 functions...
# - About packages:
# . Packages are namespaces for data AND functions.
# (You can't have a dataset 'x' and a function 'x' at the same time.)
# . You can list the packages in your environment:
search()
# . When you use a name, R goes through the search list
# shown by 'search()', package by package, stopping when
# it finds the name. This implies that the same name can appear
# in multiple packages, but only the one in the first package
# will be found.
ls <- 2:5 # mask 'ls' in "package:base" with user data
ls
rm(ls) # remove user data, unmasking the function 'ls()'
ls
#
#---
#
# * QUITTING:
q()
# R asks whether you want to save the workspace;
# usually you say "yes". Splus simply quits.
#
#---
#
# * SEMANTICS:
# Every assignment creates a copy of the assigned object.
# Assignment is by value, not by reference (unlike Python and C).
a <- c(1,3,5) #
a
b <- a # 'b' gets its own copy.
b # We couldn't tell from this, though.
a[1] <- 2 # Assign the value 2 to the first element of 'a'.
# This yields a test of whether 'a' and 'b' point to the same object.
# If they did, then 'b' would also have 2 in the first position.
a # We know this.
b # Uhu! 'b' was not changed by 'a[1] <- 2'.
# Therefore, 'b' has its own copy.
#
#--- 2008/09/04
# * SYNTAX: see
help(Syntax)
#
# * VECTORS:
# A vector is a sequence of items of the SAME basic data type.
c(1,3,4.5) # Collect three values in a vector.
c("a","ab") # Collect two strings in a vector.
c(T,F,T) # Collect three logical values in a vector.
c(2.1,T) # Not an error. Coercion of T to 1.
c(2,"a",T) # Not an error. Coercion of 1 and T to strings.
# If the items are not of the same type, they are coerced:
# string <-- numeric <-- logical
# (If the items are of variable types and should not be coerced,
# use lists instead of vectors. See below.)
#
#---
#
# * INDEXING AND SUBSELECTING VECTORS:
#
# R/S, among all languages, has probably the most powerful set of
# tools for getting at elements of vectors:
# * selection/mapping with positive integer indeces
# * delection with negative integer indeces
# * selection with logical indeces
# * selection by name when vector entries are named ("associative array")
# - Numeric indexes: ONE-based (unlike C, but like Fortran)
a <- c(1,3,5)
a[1]; a[2]; a[3]
# (This is unlike Python and C which use ZERO-based indexing.)
# - Vector indexes:
a[c(1,3)]
# or, equivalently, except for the allocation of another vector 'b':
b <- c(1,3); a[b]
# - Vector expansion: amounts to mapping the indexes, using 'a' as a map.
a[c(1,2,1,2,1,1,1,3,3,3)]
# - Exclusions with negative numeric indexes:
a[-1]
d <- a[-1]
a[c(-1,-3)]
b <- c(-1,-3); a[b] # dito
a[c(-1,-2,-3)] # Nothing left.
a <- a[-1] # Actually remove the first element of 'a'.
# - Logical selection:
a <- c(1,3,5)
a[c(T,F,T)]
b <- c(T,F,T); a[b] # dito
a>2; a[a>2] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
b <- (a>2); a[b] # dito
# Caution: If the index vector is not of equal length,
# it will be cyclically repeated:
a[F] # c(F,F,F) 'F' is repeated 3 times
a[T] # c(T,T,T)
a[c(T,F)] # c(T,F,T)
a[c(T,T,F,F,F)] # If too long, the index vector is truncated.
(1:12)[c(T,T,F)] # Leave out every third item.
# (The above scheme can be used to create arbitrary repeat patterns.)
# - Vectors can be indexed repeatedly:
a[c(1,3)][2] # Select item two of a[c(1,3)], i.e. item 3 of 'a'.
# (Looks like a matrix element in C, but isn't!!)
(a[c(1,3)])[2] # This is what the previous expression really means.
# Think of a[c(1,2)] as the result of some selection function.
a[c(1,3)][c(1,2,1,2)]
# - Vector indexing and subsetting can be used for assignment:
a[1:2] <- c(10,20); a # Print 'a' to see the effect of the assignment.
a[c(1,3)] <- c(100,200); a
a[-2] <- c(1000,2000); a
a[c(F,T,T)] <- c(10000,20000); a
a[2:3] <- 0; a # "0" is repeated to fill both vector elements.
b <- 1:10
b[3:6] <- c(10,20); b # "c(10,20)" is cyclically repeated.
# If the length does not divide, there is a warning message,
# but cyclical fill-in is done anyway.
#
#---
#
# * SOME FUNCTIONS THAT CREATE VECTORS
#
# - Manual entry of a vector:
x <- c(-1,2,5)
#
# - Equispaced sequences of numbers:
3:10
10:3
seq(3, 10)
seq(3, 10, by=1/3) # Third argument is names "by"
seq(3, 10, len=8)
seq(letters) # List of indexes into 'letters'
#
# - Repetitions:
rep(3, 10)
rep(1:3, 5)
# Here is something more complex that "rep" can also do:
rep(c(1,2,3,4), c(2,3,2,4))
rep(1:3, rep(2,3))
#
# - Logical values:
rep(T,5)
((1:10) > 5)
(1:10)[(1:10) > 5]
#
# - Random numbers:
x <- runif(5); x # Five uniform random numbers in [0,1]; see below.
y <- rnorm(5); y # Five standard normal random numbers; see below.
#
# - Random permutations and samples:
x <- sample(10); x # Random permutation of the numbers 1,2,...,10.
sample # can also do sampling with replacement...
sample(letters, 10, replace=T)
# - Read a vector from file: The file 'sp.dat' is a very long time series;
# download it from the class website.
x <- scan("sp.dat", n=1000) # First n values
x <- scan("sp.dat") # Reads 734656 numbers; may take a few sec.
# 'x' contains 8 bytes for each element, hence this many MB:
8*length(x)/2^20 # (2^20 equals one MegaByte)
object.size(x) # size in bytes; why is it not exactly 8*length(x)?
# You may not want to keep this 'x' around because of its length:
rm(x)
# Btw, if you run out of memory, try this:
options(memory=1E10, object.size=1E10)
#
#---
#
# * AUTOMATIC VECTOR EXTENSION:
#
# Array-bound errors for vectors do NOT exist for positive indexes!!!
# Vectors are shown as NA if indexed out of bounds, and
# automatically extended if assigned out of bounds.
x <- runif(5) # x has length 5.
x[6] # NA, not an error message.
x[10] <- 9.99 # Not an error! Element 10 now exists.
x # So do elements 6,7,8,9, filled in with NA.
length(x) # Assignment can extend the length.
# However, negative indexes (used for exclusion) can be out of bounds:
x[-11] # Out of bounds.
x[-9] # Ok, due to fill-in after assigning element 10.
# Automatic vector extension makes vectors very different from
# matrices and arrays, for which automatic extension does not exist.
#
#---
#
# * NAMED VECTOR ELEMENTS:
#
# - Vector elements can be named:
x <- c(first=1,second=3,third=5,fourth=7); x
# - Equivalently, naming elements can be performed in a separate step
# by assigning the "names()" attribute:
x <- c(1,3,5,7)
names(x) <- c("first","second","third","fourth")
x
names(x)
# - Element names can be used for indexing/subsetting:
x["second"]
x[c("second","fourth")]
nm <- c("second","third","second","third"); x[nm]
# - Named assignment/extension is possible:
x[c("fifth","fourth")] <- c(10,11); x
# Note: "fourth" existed and is re-assigned;
# "fifth" did not exist and is an extension of "x".
# - If names are not unique, the first matching name is selected:
c(a=1,b=2,a=3)["a"]
# - Example of automatic use: 'table()'
x <- sample(letters,size=1000,replace=T)
table(x)
#
#
#---
#
# * VECTORS WITH NAMED ELEMENTS AS ASSOCIATIVE ARRAYS:
#
# R's vectors with named elements are really a limited form
# of 'associative arrays'. Associative arrays are best explained
# in terms of an example:
# Assume you want to look up, say, salaries of persons given by name.
# Assume that salaries and names are stored in parallel vectors:
slry <- c(35000, 100000, 12000)
nm <- c("John", "Bill", "Jim")
# Turn "slry" into an associative array by giving its elements names:
names(slry) <- nm
# Salaries are now "associated" with names for lookup by name,
# the names are the "keys", the salaries the "values".
# For example, Bill's salary is:
slry["Bill"] # "Bill" is the key, slry["Bill"] the value.
#
# Another way to create the same: names arguments to c()
slry <- c(John=35000, Bill=100000, Jim=12000)
slry
#
# NOTE: If one wants to use numbers as keys, they have to be
# converted to strings first. The conversion happens automatically
# through coercion, as in
names(slry) <- c(10,20,30); slry
names(slry)
# Thus the keys are now the strings "10", "20", "30":
slry["20"]
# Caution: In lookups, numbers are NOT coerced to strings,
# because numbers act as indexes into the vector
x <- seq(10,40,by=10)
names(x) <- c(2,1,4,3) # Coerced to strings: "2","1",...
x[2] # Second element of 'x'.
x["2"] # Same as x[1], which has the name "2".
#
#
#---
#
# * RANKING, SORTING, REVERSING, PERMUTING:
x <- c(5:1,5:10); x
rank(x)
sort(x)
rev(x)
sample(x) # random permutation of 'x'; can also
sample(x, size=100, replace=T) # random sample of size 100 w. replt.
sample(x, size=100) # why does this not work?
# Here is one of the more important functions:
order(x) # !!!!!!!!!!!!!!!
help(order)
sort(x)
x[order(x)] # Same!
# Sorts also character data lexicographically:
x <- sample(letters); x # permute letters
sort(sample(letters))
# This is how you perform parallel sorting:
x <- runif(10)
y <- -x - 100 # 'y' is just another vector of the same length as 'x'.
x; y # Unordered parallel vectors
ord <- order(x)
x[ord]; y[ord] # Sorts both "x" and "y" in ascending order of "x".
#
#
#---
#
# * SIMPLE STATISTICS:
length(x)
sum(x)
mean(x)
var(x)
sd(x)
min(x)
max(x)
range(x)
median(x)
#
#---
#
# * CUMULATIVE OPERATIONS:
x <- 1:10
cumsum(x)
cumprod(x)
x <- 1:10 * c(1,-1)
cummax(x)
cummin(x)
#
#---
#
# * SIMPLE NUMERIC FUNCTIONS/TRANSFORMATIONS:
# Most functions that accept one number and return another number
# will naturally "vectorize" in R, namely, apply element by element.
x <- runif(20, min=-10, max=+10)
round(x)
trunc(x)
ceiling(x)
abs(x)
sqrt(x) # Comment?
log(x^2)
exp(1)
exp(x/100)
cos(pi) # "pi" is predefined; the number e=exp(1) is not.
acos(0.5) # What is 'acos()'?
#
#
################
#
# * MATRICES:
# Matrices in R are vectors with additional "meta-information"
# to structure them in a rectangular form.
# The elements of the vector fill the matrix column by column.
# ==> COLUMN MAJOR ORDER, as in Fortran, but unlike in C.
# Reformatting as a matrix is achieved by giving the vector
# a dimension attribute consisting of the numbers of rows and cols.
#
# - Reformatting vectors as matrices by filling successive cols or rows:
matrix(1:12, ncol=4) # Column major (default)
matrix(1:12, nrow=3) # Same; ncol is inferred
matrix(1:12, ncol=4, byrow=T) # Row major, forced with "byrow".
matrix(1:12, nrow=3, byrow=T) # Same
matrix(0:1, nrow=2, ncol=4) # What happened?
matrix(0, nrow=2, ncol=4) # "
matrix(letters, ncol=2) # Elements are now of type 'character'.
matrix(paste("Letter",letters), ncol=2)
# When reading data in text files, 'byrow=T' is needed
# for row-by-row input (download 'laser.dat' from the course page first):
m <- matrix(scan("fakelaser.dat", skip=1), ncol=4, byrow=T)
# ^^^^^^ Skip first line.
m
# Later we will see a more convenient function for reading
# tabular data ('read.table()').
# - Whether something is a matrix can be checked with 'is.matrix()':
is.matrix(matrix(1:12,3)); is.matrix(1:12)
x <- 2; is.matrix(x)
x <- 1:10; is.matrix(x)
x <- matrix(0, nrow=3, ncol=5); is.matrix(x)
is.matrix(matrix(0, nrow=3, ncol=5)) # tautology
#
# - The dimension attribute: it is the sole difference between
# a vector and a matrix. It can be queried:
dim(m) # Vector of length 2 with the two dimensions.
dim(m)[1]; dim(m)[2] # Each dimension separately.
nrow(m); ncol(m) # Same.
# Vectors can be turned into matrices by assigning the
# dimension attribute:
m <- 1:12 # Just a vector.
dim(m) <- c(3,4) # Now a matrix.
m
is.matrix(m) # TRUE
dim(m) <- NULL # Stripping to a vector.
m
is.matrix(m) # FALSE
# - The dimension name attributes: row- and col-names
colnames(m) <- letters[1:4]
rownames(m) <- LETTERS[1:3]
m
colnames(m)
rownames(m)
# - Indexing/subselecting rows and columns: (differs from C!)
m <- matrix(1:12, ncol=4)
m[1,4] # Element in row 1, column 4.
m[1:3,] # First 3 rows.
m[,3:4] # Last 2 columns.
m[1:3,3:4] # Submatrix of size 3x2 (unlike Python!)
m[c(1,2,1,2),] # Repeat rows 1 and 2.
m[,c(1,2,1,2)] # Repeat columns 1 and 2.
m[c(1,2,1,2),c(1,2,1,2)] # Repeat left-upper 2x2 matrix 4 times.
m[-1,] # Select all but the first row.
m[,-c(2,4)] # Select all but columns 2 and 4.
m["A",] # Only works if col/rownames have been assigned.
m[c("A","C"),]
m[c("A","C"),"a"]
# - ATTENTION: Selection of individual rows and cols generates vectors
# who do no longer know that they were rows or columns.
# R has no concept of col-vectors and row-vectors:
m[,1]
is.matrix(m[,1])
is.matrix(m[1,])
# Vector is vector, period (unlike Matlab).
# You can force the issue by turning vectors into
# Nx1 or 1xN matrices:
x <- 1:10
dim(x) <- c(10,1); x
dim(x) <- c(1,10); x
#
# - Index/subsetting can be used for assignment:
m[1,2] <- 0
m[1,] <- 11:14
m[,1] <- 0
m[1,c(F,F,T,T)] <- c(7,10)
#
# - Associative array feature for matrices:
rownames(m) <- c("x","y","z") # like 'names(vec)'
colnames(m) <- c("a","b","c","d")
m["x","d"] # number
m["x",] # vector
m[,"c"] # vector
m[c("x","z"),c("c","a")] # submatrix (different from Python!)
m[c("x","z","x","y"),c("c","a","a")] # col-row-rearrangement
#
# Roadmap:
# - wrap up matrices
# - arrays
# - lists
# - dataframes
# - loops and conditionals
# - character manipulation functions
# - fcts related to distributions
# - plotting functions
# - writing FUNCTIONS
# - querying object types
# - Column and row binding:
# Two functions that permit collecting cols and rows to form matrices.
x <- runif(5) # Play data.
cbind(1:5, x, x, rep(10,5)) # Column binding.
rbind(1:5, x) # Row binding.
# (Vectors are NOT thought of as columns or rows on their own;
# they take on these roles inside the "cbind" and "rbind" functions.)
# Both functions accept an arbitrary number of conforming arguments.
# You can also bind whole matrices:
cbind(rbind(1:3,11:13,21:23), cbind(1001:1003,2001:2003))
# A more friendly way of writing the same is:
m1 <- rbind(1:3,11:13,21:23) # 3x3
m2 <- cbind(1001:1003,2001:2003) # 3x2
cbind(m1, m2) # 3x5
# Conforming for 'cbind()' means the arguments have equal number
# of rows, for 'rbind()' it means equal number of columns.
# If vector arguments are not conforming, R extends them cyclically
# or clips them but may give you a warning if the shorter arguments
# are not of fractional length of the longest argument:
cbind(1:3,0) # second arg has size 1, = fraction of 3
cbind(1:6,1:3) # size 3 is a fraction of size 6
cbind(1:3,1:2) # size 2 is not a fraction of size 3 => warning
cbind(1:3,matrix(11:14,2)) # clipping: the second arg dictates nrow
# Don't rely on cyclic extension except for the simplest cases
# such as repeating constants.
# - Coercion of matrices to vectors:
# A matrix can always be treated as a vector.
# The following does not create an error message:
m <- matrix(1:12, nrow=3)
m[,1]
# Recall the column-major convention for storing matrices in R.
#
# - Some useful functions for generating patterned matrices:
diag(5)
outer(1:3,1:4)
outer(1:3,1:4, FUN=paste, sep="")
help(outer)
x <- outer(1:3,1:4)
row(x)
col(x)
row(x)>col(x)
x[row(x)>col(x)]
#
################
#
# * ARRAYS: the generalization of matrices to more than 2 indexes
#
a <- array(1:24, dim=c(2,3,4))
a
a[1,2,1]
a[,2:3,c(1,4)]
a[,,c(T,F,T,F)]
a[,,-3]
# Main use: contingency tables
# e.g., table frequencies of sex by education by income bracket ...
# The associative array feature exists also when a dimnames are given
# To understand this, we need to talk about lists first.
#
#
################ read/study up to here for HW 1 #####################
#
# * LISTS:
#
# - Recall: Vectors and matrices can carry only one basic type
# of data at a time, numeric OR logical OR character.
matrix(c(T,F),nrow=4,ncol=4)
matrix(paste(LETTERS,letters)[1:25],nrow=5)
# Lists are data structures without this restriction.
# Lists are sequences of anything. They can contain:
# basic data types, vectors, matrices, arrays, lists (recursion),...
# In particular, they can contain arbitrarily nested lists of lists.
# Examples:
list(1,"1",T,F,1E100,NA,-Inf,NULL,1:5,letters[2:5],list(1,2,"a"))
# Balanced binary tree:
list(list(list(1,2),list(3,4)),list(list(5,6),list(7,8)))
#
mylist <- list(vec=1:3, mat=cbind(1:2,3:4), flags=c(T,T,F,T),
lst=list(1:3,1:4))
mylist
# Component names are optional. They can be glued on by force:
x <- list(1,"a",T,NULL); names(x) <- LETTERS[1:4]; x
# - Access to LIST ITEMS is by index with "[[..]]", or by "$name"
# if names exist:
mylist[[2]]
mylist$mat
mylist[["mat"]]
# This is also how lists are printed:
list(1,T)
list(a=1,b=T)
#
# - Access to SUBLISTS is as if the list were a vector:
mylist[2:3] # Sublist of length 2.
mylist[2] # Sublist of length 1.
mylist[c(1,4,1,4,2,3,2,3)] # Out-of-order and expansion.
mylist[c("lst","mat","vec","mat")] # Named access.
mylist["vec"] # Sublist of length 1.
mylist[["vec"]] # Vector!!!
mylist[c(T,F,T,F)] # Logical selection
#
# - Sublists are lists and require list item access to get at
# the components:
mylist[2:3][[1]]
mylist[2:3]$mat
mylist[2][[1]] # Again, "mylist[2]" is a list of length 1...
mylist[2]$mat # Dito.
#
# - Primary use of lists in statistics:
# collecting the results of a complex statistical or numerical
# analysis, such as a regression or an eigendecomposition:
eigen(matrix(1,3,3))
# is a list with two named items: $values and $vectors,
# the first being a vector containing the eigenvalues,
# the second being a matrix with the eigenvectors in the columns.
#
#
#
################
#
# * DATAFRAMES:
#
# - In statistics, data are usually rectangular tables, cases by variables.
# Problem: Variables are not all of the same type.
# Some may be quantitative, hence stored as numeric data,.
# Other variables may be categorical and stored either
# with numeric or character/string codes for categories.
# => Matrices cannot accommodate variables of both types...
# Solution: Data frames. They are similar to matrices,
# but columns may differ in basic data types.
# (The entries have to be basic, not complex.)
#
# Main use of dataframes: data tables with mixed-type variables
#
# Dataframes are printed like matrices, but they are internally
# implemented as lists.
#
# - The function "data.frame()" can bundle conforming vectors,
# matrices, other dataframes into a single dataframe:
myframe <- data.frame(somenumbers=1:3,
somecharacters=c("a","b","c"),
somelogicals=c(T,F,T))
myframe
#
# - Auxiliary functions for dataframes:
is.data.frame(myframe) # Checking type.
as.data.frame(cbind(1:4,11:14)) # Coercing a matrix to a dataframe.
#
# - Many matrix manipulations carry over to dataframes,
# with one exception:
dim(myframe)
nrow(myframe)
ncol(myframe)
rownames(myframe)
colnames(myframe)
myframe[,3] # Col 3 coerced to vector.
myframe[2,] # << Sub-dataframe with row 3, NOT coerced to vector!
myframe[,2:3] # Sub-dataframe consisting of cols 2 and 3.
myframe[2:3,] # Sub-dataframe consisting of rows 2 and 3.
# Why is 'myframe[2,]' not coerced to a vector?
# Because the items might be of differing types!
# Hence the row 'myframe[2,]' is still a dataframe...
#
# (Of course matrix multiplication does not work:)
myframe %*% matrix(rep(1,10), nrow=5)
# - 'read.table()' reads a tabular ASCII/txt file into a dataframe,
# possibly with row and column names:
read.table("fakelaser.dat", header=F)
# The file can be an URL:
url <- "http://www-stat.wharton.upenn.edu/~buja/STAT-541/laser.dat"
my.frame <- read.table(url, header=T)
# This function has many other options for particular cases.
# From the "help()" page:
# read.table(file, header = FALSE, sep = "", quote = "\"'", dec = ".",
# row.names, col.names, as.is = FALSE, na.strings = "NA",
# colClasses = NA, nrows = -1,
# skip = 0, check.names = TRUE, fill = !blank.lines.skip,
# strip.white = FALSE, blank.lines.skip = TRUE,
# comment.char = "#")
# - Since data frames are lists of columns, we can use "lapply()" (list-apply):
lapply(myframe, class)
# The following does not do the right thing:
apply(myframe, 2, class)
# Why? "apply()" expects a matrix in its first argument;
# if it isn't, it gets coerced into a matrix;
# but matrices can't have factors in their columns, only vectors,
# so the factor columns are coerced to character columns.
################################################################
#
# Flow control: loops, conditionals
#
m <- 1:12 #play data
#
# - if-conditional:
if(length(m) > 15) { print("m is greater than 10") }
# With "else" clause:
if(length(m) > 15) {
print("m > 10")
} else {
print("m <= 10")
}
# This sort of thing is most useful inside loops; see below.
# - The vectorized "ifelse()" function:
# Not a flow control construct, but often replaces a combination of
# for-loop and if-else statements.
ifelse(c(T,F,T), c(1,2,3), c(-1,-2,-3))
# The function runs down the three arguments in parallel,
# checks each element in the first argument,
# if true, picks the corresponding element in the second argument,
# if false, picks the corresponding element in the third argument,
# returns a vector/matrix/array of the size of the first argument.
# If the second or third argument are not conforming,
# they are cyclically repeated, as in this implementation of
# 10 Bernoulli trials:
ifelse(runif(10) > 0.5, "H", "T")
# - Note the difference between 'if' and 'ifelse()':
# * 'if' is a syntactic element that dispatches execution
# depending on the outcome of a single logical outcome.
# * 'ifelse()' is a function that takes a logical vector
# and selects from two other vectors depending on the logicals.
# - for-loop: runs over the elements of a vector.
for(i in c(10,100,1000)) { j <- 2*i; print(j); }
# The variable 'i' is the looping variable. It gets set to
# each element of the vector in turn, and for each
# the subsequent, usually composite ({...}) statement gets executed.
# (Braces are optional if there is only one statement in the loop.)
# - repeat-loop: needs a "break" for termination.
repeat{ print("Trying..."); rnum <- runif(1);
if(rnum > 0.9) { print(c("Found one:",rnum)); break } }
# - while-loop:
str <- "Not yet..."
while(runif(1) < 0.99) {
str <- paste(str,".",sep="")
print(str)
}
print("Finally...")
#
#
# - Looping over row or columns of matrices with 'apply()':
m <- matrix(1:12, 3); m
# * Looping over rows::
apply(m, 1, min) # Return a vector with minima of the rows.
# ^^^ the "min()" function, applied to the 1st dimension.
# * Looping over columns:
apply(m, 2, mean) # Return a vector with the means of the columns.
# ^^^^ the "mean()" function, applied to the 2nd dim.
# * 'apply()' can also be used with functions that return vectors,
# such as 'range()' and 'sort()':
apply(m, 2, range)
# There are complications with "apply(m, 1, range)":
# the result is 2xnrow(m), not nrow(m)x2, as one would expect.
# This requires transposition, 't()', to get the expected matrix:
t(apply(m, 1, range))
#
#
# - Looping over elements of a list:
lis <- list(numvec=1:10, charvec=letters,
logicvec=rep(c(T,F),3), mat=cbind(1:3,1))
lis
lapply(lis, length) # returns a list with one number per entry: the length
unlist(lapply(lis, length)) # same but 'flattened' to a vector
sapply(lis, length) # same but Simpler ('Simple apply')
lapply(lis, dim) # returns a list with the dim attribute for each entry
lapply(lis, sort) # returns a list of sorted vectors
################################################################
#
# * STRING MANIPULATION:
#
# R has a number of functions that make the use of other scripting
# languages such as awk, perl, python less and less necessary for
# pattern matching problems. There are functions for searching
# and substituting text patterns expressed as so-called
# "regular expressions". Here is a collection:
#
# paste(..., sep = " ", collapse = NULL)
# substr(x, start, stop)
# substring(text, first, last = 1000000)
# substr(x, start, stop) <- value
# substring(text, first, last = 1000000) <- value
# strsplit(x, split, extended = TRUE)
# nchar(x) # string length
# chartr(old, new, x)
# tolower(x)
# toupper(x)
# match(x, table, nomatch = NA, incomparables = FALSE)
# x %in% table
# pmatch(x, table, nomatch = NA, duplicates.ok = FALSE)
# charmatch(x, table, nomatch = NA)
# grep(pattern, x, ignore.case=FALSE, extended=TRUE, value=FALSE)
# sub(pattern, replacement, x,
# ignore.case=FALSE, extended=TRUE)
# gsub(pattern, replacement, x,
# ignore.case=FALSE, extended=TRUE)
# regexpr(pattern, text, extended=TRUE)
#
# String dimensions when plotting:
# strwidth(s, units = "user", cex = NULL)
# strheight(s, units = "user", cex = NULL)
#
# Example: download the dataset 'dict.dat' from the course page, then...
dict <- scan("DATA/dict.dat",w="",quote="")
sel <- grep("source", dict) # All words containing "source".
dict[sel]
# Stay tuned for HW 3.
#
#
################################################################
#
# * DISTRIBUTIONS: PSEUDO-RANDOM NUMBERS, QUANTILES, CDFS, DENSITIES
#
# The following continuous distributions come with the standard
# version of R, each with
# . a random number generator,
# . a quantile function,
# . a cdf function, and
# . a density fct.
# - Uniform:
# runif(n, min = 0, max = 1)
# qunif(p, min = 0, max = 1, lower.tail = TRUE, log.p = FALSE)
# punif(q, min = 0, max = 1, lower.tail = TRUE, log.p = FALSE)
# dunif(x, min = 0, max = 1, log = FALSE)
#
# - Normal:
# rnorm(n, mean = 0, sd = 1)
# qnorm(p, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE)
# pnorm(q, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE)
# dnorm(x, mean = 0, sd = 1, log = FALSE)
#
# - Student's t:
# rt(n, df)
# qt(p, df, lower.tail = TRUE, log.p = FALSE)
# pt(q, df, ncp, lower.tail = TRUE, log.p = FALSE)
# dt(x, df, log = FALSE)
#
# - F:
# rf(n, df1, df2)
# qf(p, df1, df2, lower.tail = TRUE, log.p = FALSE)
# pf(q, df1, df2, ncp = 0, lower.tail = TRUE, log.p = FALSE)
# df(x, df1, df2, log = FALSE)
#
# - Chi-square:
# rchisq(n,df,ncp=0)
# qchisq(p, df, ncp = 0, lower.tail = TRUE, log.p = FALSE)
# pchisq(q, df, ncp = 0, lower.tail = TRUE, log.p = FALSE)
# dchisq(x, df, ncp = 0, log = FALSE)
#
# - Exponential:
# rexp(n, rate = 1)
# qexp(p, rate = 1, lower.tail = TRUE, log.p = FALSE)
# pexp(q, rate = 1, lower.tail = TRUE, log.p = FALSE)
# dexp(x, rate = 1, log = FALSE)
#
# - Cauchy:
# rcauchy(n, location = 0, scale = 1)
# qcauchy(p, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE)
# pcauchy(q, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE)
# dcauchy(x, location = 0, scale = 1, log = FALSE)
#
# - Beta:
# rbeta(n,shape1,shape2)
# qbeta(p, shape1, shape2, lower.tail = TRUE, log.p = FALSE)
# pbeta(q, shape1, shape2, ncp = 0, lower.tail = TRUE, log.p = FALSE)
# dbeta(x, shape1, shape2, ncp = 0, log = FALSE)
#
# - Gamma:
# rgamma(n,shape,rate=1,scale=1/rate)
# qgamma(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE,
# pgamma(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE,
# dgamma(x, shape, rate = 1, scale = 1/rate, log = FALSE)
#
# Some uses:
# - Random number generators are used, for example,
# . in simulations, as when evaluating the efficiency
# of statistical methods;
# . in Bootstrap inference for frequentist statistics;
# . in Bayesian computing to simulate posterior distributions.
# Base of simulations: law of large numbers (rel. freq. approx. probs.)
# - The quantile functions are used
# . to find critical values in statistical tests;
# . in q-q-plots to check empirical against theoretical distributions
# (see below).
# - The cdf functions are used for calculating p-values in
# statistical tests.
# - The normal density is sometimes used as a "kernel" in
# smoothing methods.
#
# - Simulating discrete distributions and sampling:
# The function
sample
# serves many purposes. It does sampling with and without replacement.
#
# - Bernoulli trials and Binomials: e.g., 10 head/tails, biased coin flip,
ifelse(runif(10)<.55, "H", "T")
sample(c("H","T"), size=10, repl=T, prob=c(0.55,0.45))
rbinom(n=10, size=1, p=.55)
# (Sampling WITH replacement is also called i.i.d. sampling.)
#
# - Multinomial: e.g., 10 draws from uniform distribution on 4 objects
sample(1:4, size=10, repl=T, prob=rep(1/4,4))
dmultinom; rmultinom
#
# - Sampling WITH replacement:
sample(1:10, 5) # Samples w/o replacement (default).
sample(10) # Random permutation of 1:10.
# - The function can also be applied to non-numeric data:
sample(letters) # A permutation of letters.
sample(letters, 10) # 10 distinct random letters.
sample(letters, 10, repl=T) # 10 i.i.d. samples of letters.
sample(letters, 100) # Error! Sampling w/o replacement.
sample(letters, 100, repl=T) # Fine! Sampling with replacement.
#
# 'sample()' is used to implement modern resampling methods such as
# the bootstrap and permutation tests (see below).
#
# . Important note on seeding random number generators:
# The "seed" stores the state of the pseudo-random number generator.
.Random.seed
runif(1)
.Random.seed # The seed has changed.
Random.seed.store <- .Random.seed # Store the seed.
runif(1) # (*) Remember this random number.
runif(10) # Now push the generator 10 times.
.Random.seed <- Random.seed.store # Force it back to recreate (*)
runif(1)
# Indeed, this is the same random number as in (*).
# Storing the seed is important for simulations that must be
# reproduceable, for example, when putting simulation results
# in a publication.
#
################################################################
#
# * PLOTTING:
#
# - The par() function for querying and setting plotting parameters.
help(par)
#
# Some important parameters to set:
par(mfrow=c(3,4), # 3x4 layout of 12 plots, filling rows first (mfcol)
mgp=c(2.0, 0.5, 0), # distance of axis title ("x","y"),
# numbers at the ticks, border line,
# (in margin lines)
mar=c(3,2.5,2.5,1.5), # margins: bottom-left-top-right (in lines)
oma=rep(1,4), # outer margin around the collection
# of plots on the page (in lines)
mex=0.8, # expansion of margin line width,
# relative to default
cex=0.8) # expansion of characters, relative to default
#
# - List of all possible plot characters:
windows(width=10, height=3)
plot(1:25, rep(1,25), pch=1:25, xlab="", ylab="", yaxt="n", cex=2)
#
#
################################################################
#
# WRITING FUNCTIONS:
#
# Syntax: simplest case
fun1 <- function(x) { plot(x) }
fun2 <- function(x) { 2*x }
# Semantics 1: fun1 creates a 'side effect', plotting the input argument;
# fun2 returns a result, the doubled input argument.
fun1(1:10) # plots 1,2,...,10
fun2(1:10) # returns the vector containing 2,4,...,20
# Semantics 2: Functions are 'first class citizens', that is,
# they can be assigned, printed, passed as arguments,...
fun2 # Print the function.
print(fun2) # Same.
funx <- fun2; funx(1:5) # Application: len <- length; len(1:10)
# What is this?
(function(x) { 2*x })(1:10)
# It's getting weird:
funp <- function(f) print(f)
funp(funp)
# Assignment:
x <- fun2(1:10); x # Fine.
x <- fun1(1:10); x # Assigns NULL, but plots. 'fun1()' is for side effect.
# Syntax: The following are equivalent ways of writing fun2().
fun2 <- function(x) { 2*x } # The original.
fun2 <- function(x) 2*x # Single statement bodies don't need braces.
fun2 <- function(x) { return(2*x) } # Make the return explicit.
fun2 <- function(x) { y <- 2*x; y } # Value of last statement is returned.
fun2 <- function(x) { y <- 2*x; return(y) } # Dito
# Functions with more arguments and complex bodies:
fun3 <- function(t, q) {
par(mgp=c(1.8,.5,0), mar=c(3,3,2,1))
plot(t, q, pch=16, cex=.5)
}
fun3(t=1:100, q=rnorm(100))
# Function calls with named arguments: independent of order of arguments
fun3(q=rnorm(100), t=1:100)
# Functions with defaulted arguments: defaults can use preceding arguments
fun4 <- function(y=rnorm(100), x=1:length(y), cex=.5, pch=16, w=5, h=5, new=T) {
if(new)
par(mgp=c(1.8,.5,0), mar=c(3,3,2,1))
plot(x, y, pch=pch, cex=cex) # 1st 'cex': argument name
# 2nd 'cex': value to be passed to plot
}
fun4() # All arguments defaulted.
fun4(1:100, rnorm(100)) # 1st and 2nd argument passed by order.
fun4(1:100, rnorm(100), new=F) # Last argument is 7th by order, so needs name.
fun4(1:100, rnorm(100), new=F, cex=.2)
fun4(1:100, rnorm(100), new=F, cex=1, pch=3)
fun4(x=1:100, y=rnorm(100), pch=2, w=10, h=2) # x= and y= not needed but more readable
fun4(cex=.2)
# Functions returning complex data structures:
fun5 <- function(x) {
list(N=length(x),
location=c(
mean=mean(x),
mean.tr.05=mean(x,trim=.05),
mean.tr.10=mean(x,trim=.10),
mean.tr.25=mean(x,trim=.25),
median=median(x)
),
dispersion=c(
sdev=sd(x),
mad=mad(x),
IQR=IQR(x)
)
)
}
summaries <- fun5(rnorm(10000))
summaries
# Semantics 3: All passing of arguments is by value, not reference/name.
# In other words, all arguments get copied into the body of the function.
fun5 <- function(x) { x[1] <- Inf; x }
x <- 1:3; fun5(x); x
# In the above line, fun5() is working on a copy of x, also called x.
# Semantics 4: Function bodies have access to outside variables.
fun6 <- function() { x[1] <- Inf; x }
x <- 1:3; fun6()
x # 'x' was not changed. The body of fun6() worked on a copy of x.
# Rule: Do not use this feature of R semantics.
# Pass all data into a function as arguments.
# (See Section 10.7 of Venables & Smith on "scoping rules" in R.)
# Functions called inside functions:
fun7 <- function(x) { rbind(x, fun2(x)) }
fun7(1:4)
# (This is an exception to the rule of not relying on the scoping rules of R.)
# Functions defined inside functions:
fun8 <- function(x) { fun9 <- function(x) { 2*x }; fun9(x) }
fun8(1:5)
fun9(1:5) # Does not exist; defined only inside the body.
# Functions passed as arguments into other functions:
x <- matrix(rnorm(1000),100,10)
fun9 <- function(x) quantile(x, prob=c(.25,.50,.75))
apply(x, 2, fun9)
# Equivalent:
apply(x, 2, function(x) quantile(x, prob=c(.25,.50,.75)))
################################################################
#
# * QUERYING R OBJECTS:
#
# Asking for the primary data type:
class(1:10)
class(T)
class(letters)
class(swiss) # Like 'letters', 'swiss' comes with R.
#
# Queries about basic data types:
is.numeric(1:10)
is.character(letters)
is.logical(F)
is.na(NA) # !!!!!!!!!!! use for removal of NAs:
x <- rnorm(100); x[rnorm(100)>1] <- NA; is.na(x); x[!is.na(x)]
is.infinite(-Inf)
is.null(NULL)
#
# Queries about complex data types:
is.vector(letters)
is.array(array(1:12,c(2,3,2)))
is.matrix(swiss)
is.data.frame(swiss)
is.list(swiss)
is.function(mean)
#
# All the query functions that start with "is.":
apropos("^is[.]") # '.' is a special character in regular expressions.
#
# Querying components of lists and columns in data frames:
names(swiss)
# Querying row and column names in matrices and data frames:
colnames(swiss) # Same as names() for data frames.
rownames(swiss)
#
################################################################
#
# * MORE ON R: A MENTAL MODEL FOR R
#
# black box with inner state described by
# * Variables x, y, z, err, ..., pi, letters, LETTERS, ...
# * Their values, that is, the data structures such as
# values, vectors, matrices,...
# to which the variables point.
# * The black box has an engine, the intepreter,
# that acts on the state of the box.
# * The state of the black box is changed by executing
# assignments, such as x <- runif(100)
# * On quitting q() you decide whether to save the newly
# formed variables and their values.
# * Learn about the state of the black box by executing:
# . ls() to learn what variables are bound
# . print(x) to learn what the value of the variable is
# * There are two additional black boxes:
# . par(), ... to learn about the values and data structures
# of two sub-boxes:
# - Plotting box:
# . State: plotting parameters such as character size, margin width,...
# . Reports the settings of the plotting parameters:
par()
# . State can be changed by executing, say,
par(mfrow=c(2,2))
# - Random number generator:
# . State: the seed vector
.Random.seed
# . State changes when 1) a random number is generated, or
# 2) .Random.seed is set by the user
# - Printing and other parameters, e.g., line width, number of digits...
options()
#
#
################################################################
#
# Changing the working directory:
# In the standard interface, one can change the working directory...
# - temporarily: -> File -> Change Dir
# - permanently: R-click on R icon -> Edit Properties -> Start in:
#
################################################################
| /Methods1/Lectures/R-intro.R | no_license | nooreendabbish/Methods | R | false | false | 56,444 | r |
################################################################
#
#
# INTRO TO R
#
# The R website:
#
# http://cran.r-project.org
#
# (Google "R" -> one of the first entries)
#
# Downloading R:
#
# -> Sidebar "Download, Packages": CRAN
# -> any site in the US
# -> Windows
# -> base
# -> "Download R-2.... for Windows (32 bit build)"
# -> installation dialog in setup wizzard
#
# The setup program should self-install and create an icon on your desktop.
# Clicking the icon should open up an R interpreter window ("R Gui").
#
# The base is really just the base. There are many contributed
# library packages whose binaries can be downloaded from
#
# -> Packages
#
# You will not have to download them explicitly, though;
# there are R functions that allow you to get them while running R.
# In the R Gui you can also go through the "Packages" item in the toolbar.
#
################
#
# OPERATION OF R:
#
# * For non-Emacs users:
#
# 0) go to the class web page and download this file.
# Open an editor (Word, Wordpad,...) on this file (change .R to .txt)
# AND open up an R GUI window by clicking on the R icon.
# Reduce the size of both windows so both are accessible.
#
# 1) Copy R code from this file into the R interpreter window.
# Use shortcuts: In the editor highlight lines, hit <Ctrl>-C,
# then move to the R window and hit <Ctrl>-V.
# Examples:
1+2
1:10
2^(1:20)
runif(10)
rnorm(10)
1:10 + rnorm(10)
#
# 2) Experiment with R code
# by editing THIS file in the editor window, or
# by editing the command line in the R window (if it's one-liners).
#
# Commands for line editing in the R interpreter window:
# Note: "^p" means you hold down the modifier key <Ctrl> and hit "p",
# just like the modifier key <Shift> used for capitalization
# ^p get back the previous command line for editing and executing
# repeating ^p goes further back in the command history
# ^b step back one character in the command line
# ^f step forward one character in the command line
# ^a move to the beginning of the line
# ^e move to the end of the line
# ^d or <Delete> to delete the character under the cursor
# ^h or <Backspace> to delete the previous character
# ^k kill the rest of the line starting from the cursor
# otherwise: you are in insert mode
# (These editing commands are borrowed from the Emacs editor.)
#
#
# * For Emacs users:
#
# download the ESS macros ("Emacs Speaks Statistics") from r-project.org:
# -> R GUIs -> Emacs (ESS) -> Downloads
# Download the latest zip or tar.gz file.
# Unpack and install; ESS should work right away. Skip to "Operation:" and try.
# If it doesn't work right away,
# you may have to put these lines in your .emacs file:
## (setq inferior-R-program-name "c:/Program Files/R/R-2.7.1pat/bin/Rterm.exe")
# ^^^^ path to your R executable ^^^^
## (load-file "c:/EMACS/ESS/ess-5.3.0/lisp/ess-site.el")
# ^^^path to the file "ess-site.el"^^^
#
# Operation:
# - Split the Emacs window into two windows:
# ^x 2
# - Edit THIS file in the upper window.
# ^x ^f filepath <Enter>
# - Start R in the lower window:
# ^x o (move the cursor to the lower window)
# <Alt>-x R (start R inside Emacs)
# - If you like to shrink one of the windows, put this line in your .emacs:
# (global-set-key "\M-o" (lambda () (interactive) (shrink-window 1)))
# Then <Alt>-o will shrink the present window and expand
# the other window by one line.
# - There are macros to copy and execute lines, functions, and regions
# from the upper buffer into the lower buffer:
# ^c ^j (execute current line and leave the cursor in place)
# ^c ^n (execute current line and move to next line of R code)
# ^c ^f (execute function, assuming the cursor is
# inside its body)
# ^c ^r (execute region)
# - A small nuisance is that the lower (R) window does not move
# the bottom line to the center after executing an expression.
# This can be fixed by putting the following in your .emacs:
# (global-set-key "\M-s" "\C-xo\M->\C-l\C-xo")
# Then <Alt>-s will move the bottom line to the center.
#
#
##################
#
#
# * R is an INTERPRETED LANGUAGE:
# Users type expressions and see results immediately.
# Example:
for(i in 1:10) { if(i%%2==0) print(i) }
# As opposed to:
# - ... languages (C, Fortran)
# - ... software (such as SAS' JMP)
#
#
# * R is HIGH-LEVEL:
# It operates on complex data structures such as
# vectors, matrices, arrays, lists, dataframes,
# as opposed to C and Fortran that operate on individual numbers only.
# (This requires some getting used to for C programmers.)
#
#
# * PRIMARY BEHAVIOR: Whatever is typed, print the results.
2
print(2) # same
"a"
print("a") # same
# (Q: Why is there '[1]' preceding the results? A: ...)
# Vector of length greater than 1:
1:3
print(1:3) # same
#
#
# * SYNTAX:
# - Largely scientific/math notation; base 10.
# - A wealth of functions.
# - Comments run from a "#" to the end of the line; no multiline comments.
# - Spaces are irrelevant, except inside strings:
2+3; 2 + 3; "ab"; "a b"
# - Statements can run over multiple lines:
2 + 3 + # \
4 # / One statement
# But if a statement is syntactically complete at
# the end of the line, it won't continue:
2 + 3 # \
+ 4 # / Two statements
# - Statements can be separated by ";".
2; 3^3; sqrt(9)
#
#---
#
# * BASIC DATA TYPES:
#
#
# - NUMERIC: double precision by default (How many bytes?)
# Integers are represented as doubles, although the print function
# shows them as integer:
-2.000
1E5
2E-3
# The usual unary and binary operations and analytic functions:
# +, -, *, /, %%, %/%, ^, log, sqrt, sin, acos...
2+3 # Add.
5.3*1E10 # Multiply.
10%%3 # Modulo.
exp(1) # Exponentiation.
log(2.718282) # Log of the number 'e'; 'log' is e-based.
log10(10) # 10-based log
pi # That's the number 'greek pi', 3.14159
sin(pi/2) # Angles are to be given in arcs, not degrees.
sin(pi) # Dito.
acos(0) # This is the inverse of cos, arccos, hence pi/2.
pi/2 # This is the only hardwired constant: 3.14159...
#
#
# - STRINGS: can be single or double quoted, but the print function
# uses double quotes.
'a'; "a"; 'abc'; "abc"
# (In C and Python strings are character vectors.
# In R strings are basic types; there is no single character type.
# Characters are just strings of length 1.
# There is no indexed access to individual characters and
# substrings in R; one uses the "substring" function instead:
substring("abcxyz",4,6)
# Other basic string manipulations:
paste("my","word")
nchar("Supercalifragilistikexpialidocious")
# There are two hardwired character vectors that contain the lower and
# upper case letters:
letters
LETTERS
#
#
# - LOGICAL values: have two names each, but the print function
# always uses the longer.
TRUE; FALSE; T; F
# They are implemented as the values 1 and 0 for T and F, respectively.
# They are the result of the usual comparisons: <, >, <=, >=, ==, !=
1<2; 1>2; "ab" <= "abcd"
"ab" > "ac"; "ab" != "AB"
"ab" != 2; 0==F; 1==T
#
#
# - MISSING values NA, Inf, -Inf:
NA; NaN; Inf; -Inf; 1/0; Inf==1/0; 0/0
# Watch out: the following does not give T!!!
NA==1
# If you want to test for NA, you must use the function is.na():
is.na(NA)
#
#
# - FUNCTIONS:
# * R is a FUNCTIONAL LANGUAGE:
# Functions return values that in turn can be arguments to functions.
# Expressions evaluate inside out, e.g., log(2*2.5))^3:
2.5; 2*2.5; log(2*2.5); log(2*2.5)^3
#
#
# * STATEMENTS/EXPRESSIONS:
# There are two types of expressions: assignments and side effects.
# 1) Assignments allocate data structures and
# make variables point to them.
x <- 1:3 # Allocate a vector 1,2,3 and make 'x' point to it.
# 2) Side effects are essentially display operations
# such as printing, plotting, showing help; unlike assignments,
# they don't change the computational state of the R system.
x
print(x)
plot(x)
help("+") # Show the help page of addition.
help(sqrt) # Show the help page of the square root function.
help("sqrt") # Dito.
# 3) Composite Statements:
{print(1); plot(1:10)}
# Will be needed in loops and functions.
#
#
# - Assignments to variables come in four equivalent syntactic forms:
# x <- ...
# x = ...
# ... -> x
# assign("x",...)
# Examples:
x <- sqrt(2 * 3) # Root of product of 2 and 3
x = sqrt(2 * 3) # Both can be used: '=' and '<-'
sqrt(2 * 3) -> x # This can be used, too, if you must...
y <- c(1,3,10,1,1,1111,0,1,1) # combine 1,3,10... into a vector 'y'
z <- 1:3 # Assign the vector containing 1,2,3 to a 'z'.
assign("x", sqrt(2*4)); print(x)
# Note that variables jump into existence upon assignment.
# Unlike C and Fortran, there is no need to declare variables.
# The variables are not 'typed', that is, any variable can
# point to data of any type, such as numbers, logicals, vectors,...
#
#---
#
# * HELP: help(function) or help("function") shows function documentation.
help(sqrt)
# (Emacs users: call help.start() before using help.)
# In the output of this function, check out the section
# with header "See Also:". It will tell you that you
# can find related functions by calling
help("Arithmetic")
help("log")
help("sin")
help("Special")
#
help(c)
help("c") # Same as help(c)
help("*") # help(*) does not work
#
# * APROPOS: apropos("char") lists all functions whose name contains
# the string "char".
apropos("char")
# This is often useful for finding related functions.
# Apropos combined with the section "See Also:" in the output
# of help() is a powerful tool for searching functions.
# There are about 1,700 built-in functions, and more if you
# download special-purpose packages from the R website.
#
# * Printing a function: allows you to see the arguments in a simple way
runif # same as: print(runif)
rnorm # (functions are "first class citizens", like numbers, vectors,...)
#
#---
#
# * MANAGEMENT OF DATA AND FUNCTIONS:
# - Listing R objects, both data and functions: either of
ls(); objects()
# This lists all data structures and functions that YOU defined.
# - Removing data and functions:
x <- 1:10
rm(x)
x
# - Looking for partly remembered data and functions:
# In case you remember only part of a name, you can look it up
# with a partial pattern:
xx <- 10
ls(pattern="x")
# This will list any dataset and function whose name contains "x"
# such as 'last.warning'.
# - List all functions that come with the base package of R:
ls("package:base") # Over 1,100 functions...
# - About packages:
# . Packages are namespaces for data AND functions.
# (You can't have a dataset 'x' and a function 'x' at the same time.)
# . You can list the packages in your environment:
search()
# . When you use a name, R goes through the search list
# shown by 'search()', package by package, stopping when
# it finds the name. This implies that the same name can appear
# in multiple packages, but only the one in the first package
# will be found.
ls <- 2:5 # mask 'ls' in "package:base" with user data
ls
rm(ls) # remove user data, unmasking the function 'ls()'
ls
#
#---
#
# * QUITTING:
q()
# R asks whether you want to save the workspace;
# usually you say "yes". Splus simply quits.
#
#---
#
# * SEMANTICS:
# Every assignment creates a copy of the assigned object.
# Assignment is by value, not by reference (unlike Python and C).
a <- c(1,3,5) #
a
b <- a # 'b' gets its own copy.
b # We couldn't tell from this, though.
a[1] <- 2 # Assign the value 2 to the first element of 'a'.
# This yields a test of whether 'a' and 'b' point to the same object.
# If they did, then 'b' would also have 2 in the first position.
a # We know this.
b # Uhu! 'b' was not changed by 'a[1] <- 2'.
# Therefore, 'b' has its own copy.
#
#--- 2008/09/04
# * SYNTAX: see
help(Syntax)
#
# * VECTORS:
# A vector is a sequence of items of the SAME basic data type.
c(1,3,4.5) # Collect three values in a vector.
c("a","ab") # Collect two strings in a vector.
c(T,F,T) # Collect three logical values in a vector.
c(2.1,T) # Not an error. Coercion of T to 1.
c(2,"a",T) # Not an error. Coercion of 1 and T to strings.
# If the items are not of the same type, they are coerced:
# string <-- numeric <-- logical
# (If the items are of variable types and should not be coerced,
# use lists instead of vectors. See below.)
#
#---
#
# * INDEXING AND SUBSELECTING VECTORS:
#
# R/S, among all languages, has probably the most powerful set of
# tools for getting at elements of vectors:
# * selection/mapping with positive integer indeces
# * delection with negative integer indeces
# * selection with logical indeces
# * selection by name when vector entries are named ("associative array")
# - Numeric indexes: ONE-based (unlike C, but like Fortran)
a <- c(1,3,5)
a[1]; a[2]; a[3]
# (This is unlike Python and C which use ZERO-based indexing.)
# - Vector indexes:
a[c(1,3)]
# or, equivalently, except for the allocation of another vector 'b':
b <- c(1,3); a[b]
# - Vector expansion: amounts to mapping the indexes, using 'a' as a map.
a[c(1,2,1,2,1,1,1,3,3,3)]
# - Exclusions with negative numeric indexes:
a[-1]
d <- a[-1]
a[c(-1,-3)]
b <- c(-1,-3); a[b] # dito
a[c(-1,-2,-3)] # Nothing left.
a <- a[-1] # Actually remove the first element of 'a'.
# - Logical selection:
a <- c(1,3,5)
a[c(T,F,T)]
b <- c(T,F,T); a[b] # dito
a>2; a[a>2] # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
b <- (a>2); a[b] # dito
# Caution: If the index vector is not of equal length,
# it will be cyclically repeated:
a[F] # c(F,F,F) 'F' is repeated 3 times
a[T] # c(T,T,T)
a[c(T,F)] # c(T,F,T)
a[c(T,T,F,F,F)] # If too long, the index vector is truncated.
(1:12)[c(T,T,F)] # Leave out every third item.
# (The above scheme can be used to create arbitrary repeat patterns.)
# - Vectors can be indexed repeatedly:
a[c(1,3)][2] # Select item two of a[c(1,3)], i.e. item 3 of 'a'.
# (Looks like a matrix element in C, but isn't!!)
(a[c(1,3)])[2] # This is what the previous expression really means.
# Think of a[c(1,2)] as the result of some selection function.
a[c(1,3)][c(1,2,1,2)]
# - Vector indexing and subsetting can be used for assignment:
a[1:2] <- c(10,20); a # Print 'a' to see the effect of the assignment.
a[c(1,3)] <- c(100,200); a
a[-2] <- c(1000,2000); a
a[c(F,T,T)] <- c(10000,20000); a
a[2:3] <- 0; a # "0" is repeated to fill both vector elements.
b <- 1:10
b[3:6] <- c(10,20); b # "c(10,20)" is cyclically repeated.
# If the length does not divide, there is a warning message,
# but cyclical fill-in is done anyway.
#
#---
#
# * SOME FUNCTIONS THAT CREATE VECTORS
#
# - Manual entry of a vector:
x <- c(-1,2,5)
#
# - Equispaced sequences of numbers:
3:10
10:3
seq(3, 10)
seq(3, 10, by=1/3) # Third argument is names "by"
seq(3, 10, len=8)
seq(letters) # List of indexes into 'letters'
#
# - Repetitions:
rep(3, 10)
rep(1:3, 5)
# Here is something more complex that "rep" can also do:
rep(c(1,2,3,4), c(2,3,2,4))
rep(1:3, rep(2,3))
#
# - Logical values:
rep(T,5)
((1:10) > 5)
(1:10)[(1:10) > 5]
#
# - Random numbers:
x <- runif(5); x # Five uniform random numbers in [0,1]; see below.
y <- rnorm(5); y # Five standard normal random numbers; see below.
#
# - Random permutations and samples:
x <- sample(10); x # Random permutation of the numbers 1,2,...,10.
sample # can also do sampling with replacement...
sample(letters, 10, replace=T)
# - Read a vector from file: The file 'sp.dat' is a very long time series;
# download it from the class website.
x <- scan("sp.dat", n=1000) # First n values
x <- scan("sp.dat") # Reads 734656 numbers; may take a few sec.
# 'x' contains 8 bytes for each element, hence this many MB:
8*length(x)/2^20 # (2^20 equals one MegaByte)
object.size(x) # size in bytes; why is it not exactly 8*length(x)?
# You may not want to keep this 'x' around because of its length:
rm(x)
# Btw, if you run out of memory, try this:
options(memory=1E10, object.size=1E10)
#
#---
#
# * AUTOMATIC VECTOR EXTENSION:
#
# Array-bound errors for vectors do NOT exist for positive indexes!!!
# Vectors are shown as NA if indexed out of bounds, and
# automatically extended if assigned out of bounds.
x <- runif(5) # x has length 5.
x[6] # NA, not an error message.
x[10] <- 9.99 # Not an error! Element 10 now exists.
x # So do elements 6,7,8,9, filled in with NA.
length(x) # Assignment can extend the length.
# However, negative indexes (used for exclusion) can be out of bounds:
x[-11] # Out of bounds.
x[-9] # Ok, due to fill-in after assigning element 10.
# Automatic vector extension makes vectors very different from
# matrices and arrays, for which automatic extension does not exist.
#
#---
#
# * NAMED VECTOR ELEMENTS:
#
# - Vector elements can be named:
x <- c(first=1,second=3,third=5,fourth=7); x
# - Equivalently, naming elements can be performed in a separate step
# by assigning the "names()" attribute:
x <- c(1,3,5,7)
names(x) <- c("first","second","third","fourth")
x
names(x)
# - Element names can be used for indexing/subsetting:
x["second"]
x[c("second","fourth")]
nm <- c("second","third","second","third"); x[nm]
# - Named assignment/extension is possible:
x[c("fifth","fourth")] <- c(10,11); x
# Note: "fourth" existed and is re-assigned;
# "fifth" did not exist and is an extension of "x".
# - If names are not unique, the first matching name is selected:
c(a=1,b=2,a=3)["a"]
# - Example of automatic use: 'table()'
x <- sample(letters,size=1000,replace=T)
table(x)
#
#
#---
#
# * VECTORS WITH NAMED ELEMENTS AS ASSOCIATIVE ARRAYS:
#
# R's vectors with named elements are really a limited form
# of 'associative arrays'. Associative arrays are best explained
# in terms of an example:
# Assume you want to look up, say, salaries of persons given by name.
# Assume that salaries and names are stored in parallel vectors:
slry <- c(35000, 100000, 12000)
nm <- c("John", "Bill", "Jim")
# Turn "slry" into an associative array by giving its elements names:
names(slry) <- nm
# Salaries are now "associated" with names for lookup by name,
# the names are the "keys", the salaries the "values".
# For example, Bill's salary is:
slry["Bill"] # "Bill" is the key, slry["Bill"] the value.
#
# Another way to create the same: names arguments to c()
slry <- c(John=35000, Bill=100000, Jim=12000)
slry
#
# NOTE: If one wants to use numbers as keys, they have to be
# converted to strings first. The conversion happens automatically
# through coercion, as in
names(slry) <- c(10,20,30); slry
names(slry)
# Thus the keys are now the strings "10", "20", "30":
slry["20"]
# Caution: In lookups, numbers are NOT coerced to strings,
# because numbers act as indexes into the vector
x <- seq(10,40,by=10)
names(x) <- c(2,1,4,3) # Coerced to strings: "2","1",...
x[2] # Second element of 'x'.
x["2"] # Same as x[1], which has the name "2".
#
#
#---
#
# * RANKING, SORTING, REVERSING, PERMUTING:
x <- c(5:1,5:10); x
rank(x)
sort(x)
rev(x)
sample(x) # random permutation of 'x'; can also
sample(x, size=100, replace=T) # random sample of size 100 w. replt.
sample(x, size=100) # why does this not work?
# Here is one of the more important functions:
order(x) # !!!!!!!!!!!!!!!
help(order)
sort(x)
x[order(x)] # Same!
# Sorts also character data lexicographically:
x <- sample(letters); x # permute letters
sort(sample(letters))
# This is how you perform parallel sorting:
x <- runif(10)
y <- -x - 100 # 'y' is just another vector of the same length as 'x'.
x; y # Unordered parallel vectors
ord <- order(x)
x[ord]; y[ord] # Sorts both "x" and "y" in ascending order of "x".
#
#
#---
#
# * SIMPLE STATISTICS:
length(x)
sum(x)
mean(x)
var(x)
sd(x)
min(x)
max(x)
range(x)
median(x)
#
#---
#
# * CUMULATIVE OPERATIONS:
x <- 1:10
cumsum(x)
cumprod(x)
x <- 1:10 * c(1,-1)
cummax(x)
cummin(x)
#
#---
#
# * SIMPLE NUMERIC FUNCTIONS/TRANSFORMATIONS:
# Most functions that accept one number and return another number
# will naturally "vectorize" in R, namely, apply element by element.
x <- runif(20, min=-10, max=+10)
round(x)
trunc(x)
ceiling(x)
abs(x)
sqrt(x) # Comment?
log(x^2)
exp(1)
exp(x/100)
cos(pi) # "pi" is predefined; the number e=exp(1) is not.
acos(0.5) # What is 'acos()'?
#
#
################
#
# * MATRICES:
# Matrices in R are vectors with additional "meta-information"
# to structure them in a rectangular form.
# The elements of the vector fill the matrix column by column.
# ==> COLUMN MAJOR ORDER, as in Fortran, but unlike in C.
# Reformatting as a matrix is achieved by giving the vector
# a dimension attribute consisting of the numbers of rows and cols.
#
# - Reformatting vectors as matrices by filling successive cols or rows:
matrix(1:12, ncol=4) # Column major (default)
matrix(1:12, nrow=3) # Same; ncol is inferred
matrix(1:12, ncol=4, byrow=T) # Row major, forced with "byrow".
matrix(1:12, nrow=3, byrow=T) # Same
matrix(0:1, nrow=2, ncol=4) # What happened?
matrix(0, nrow=2, ncol=4) # "
matrix(letters, ncol=2) # Elements are now of type 'character'.
matrix(paste("Letter",letters), ncol=2)
# When reading data in text files, 'byrow=T' is needed
# for row-by-row input (download 'laser.dat' from the course page first):
m <- matrix(scan("fakelaser.dat", skip=1), ncol=4, byrow=T)
# ^^^^^^ Skip first line.
m
# Later we will see a more convenient function for reading
# tabular data ('read.table()').
# - Whether something is a matrix can be checked with 'is.matrix()':
is.matrix(matrix(1:12,3)); is.matrix(1:12)
x <- 2; is.matrix(x)
x <- 1:10; is.matrix(x)
x <- matrix(0, nrow=3, ncol=5); is.matrix(x)
is.matrix(matrix(0, nrow=3, ncol=5)) # tautology
#
# - The dimension attribute: it is the sole difference between
# a vector and a matrix. It can be queried:
dim(m) # Vector of length 2 with the two dimensions.
dim(m)[1]; dim(m)[2] # Each dimension separately.
nrow(m); ncol(m) # Same.
# Vectors can be turned into matrices by assigning the
# dimension attribute:
m <- 1:12 # Just a vector.
dim(m) <- c(3,4) # Now a matrix.
m
is.matrix(m) # TRUE
dim(m) <- NULL # Stripping to a vector.
m
is.matrix(m) # FALSE
# - The dimension name attributes: row- and col-names
colnames(m) <- letters[1:4]
rownames(m) <- LETTERS[1:3]
m
colnames(m)
rownames(m)
# - Indexing/subselecting rows and columns: (differs from C!)
m <- matrix(1:12, ncol=4)
m[1,4] # Element in row 1, column 4.
m[1:3,] # First 3 rows.
m[,3:4] # Last 2 columns.
m[1:3,3:4] # Submatrix of size 3x2 (unlike Python!)
m[c(1,2,1,2),] # Repeat rows 1 and 2.
m[,c(1,2,1,2)] # Repeat columns 1 and 2.
m[c(1,2,1,2),c(1,2,1,2)] # Repeat left-upper 2x2 matrix 4 times.
m[-1,] # Select all but the first row.
m[,-c(2,4)] # Select all but columns 2 and 4.
m["A",] # Only works if col/rownames have been assigned.
m[c("A","C"),]
m[c("A","C"),"a"]
# - ATTENTION: Selection of individual rows and cols generates vectors
# who do no longer know that they were rows or columns.
# R has no concept of col-vectors and row-vectors:
m[,1]
is.matrix(m[,1])
is.matrix(m[1,])
# Vector is vector, period (unlike Matlab).
# You can force the issue by turning vectors into
# Nx1 or 1xN matrices:
x <- 1:10
dim(x) <- c(10,1); x
dim(x) <- c(1,10); x
#
# - Index/subsetting can be used for assignment:
m[1,2] <- 0
m[1,] <- 11:14
m[,1] <- 0
m[1,c(F,F,T,T)] <- c(7,10)
#
# - Associative array feature for matrices:
rownames(m) <- c("x","y","z") # like 'names(vec)'
colnames(m) <- c("a","b","c","d")
m["x","d"] # number
m["x",] # vector
m[,"c"] # vector
m[c("x","z"),c("c","a")] # submatrix (different from Python!)
m[c("x","z","x","y"),c("c","a","a")] # col-row-rearrangement
#
# Roadmap:
# - wrap up matrices
# - arrays
# - lists
# - dataframes
# - loops and conditionals
# - character manipulation functions
# - fcts related to distributions
# - plotting functions
# - writing FUNCTIONS
# - querying object types
# - Column and row binding:
# Two functions that permit collecting cols and rows to form matrices.
x <- runif(5) # Play data.
cbind(1:5, x, x, rep(10,5)) # Column binding.
rbind(1:5, x) # Row binding.
# (Vectors are NOT thought of as columns or rows on their own;
# they take on these roles inside the "cbind" and "rbind" functions.)
# Both functions accept an arbitrary number of conforming arguments.
# You can also bind whole matrices:
cbind(rbind(1:3,11:13,21:23), cbind(1001:1003,2001:2003))
# A more friendly way of writing the same is:
m1 <- rbind(1:3,11:13,21:23) # 3x3
m2 <- cbind(1001:1003,2001:2003) # 3x2
cbind(m1, m2) # 3x5
# Conforming for 'cbind()' means the arguments have equal number
# of rows, for 'rbind()' it means equal number of columns.
# If vector arguments are not conforming, R extends them cyclically
# or clips them but may give you a warning if the shorter arguments
# are not of fractional length of the longest argument:
cbind(1:3,0) # second arg has size 1, = fraction of 3
cbind(1:6,1:3) # size 3 is a fraction of size 6
cbind(1:3,1:2) # size 2 is not a fraction of size 3 => warning
cbind(1:3,matrix(11:14,2)) # clipping: the second arg dictates nrow
# Don't rely on cyclic extension except for the simplest cases
# such as repeating constants.
# - Coercion of matrices to vectors:
# A matrix can always be treated as a vector.
# The following does not create an error message:
m <- matrix(1:12, nrow=3)
m[,1]
# Recall the column-major convention for storing matrices in R.
#
# - Some useful functions for generating patterned matrices:
diag(5)
outer(1:3,1:4)
outer(1:3,1:4, FUN=paste, sep="")
help(outer)
x <- outer(1:3,1:4)
row(x)
col(x)
row(x)>col(x)
x[row(x)>col(x)]
#
################
#
# * ARRAYS: the generalization of matrices to more than 2 indexes
#
a <- array(1:24, dim=c(2,3,4))
a
a[1,2,1]
a[,2:3,c(1,4)]
a[,,c(T,F,T,F)]
a[,,-3]
# Main use: contingency tables
# e.g., table frequencies of sex by education by income bracket ...
# The associative array feature exists also when a dimnames are given
# To understand this, we need to talk about lists first.
#
#
################ read/study up to here for HW 1 #####################
#
# * LISTS:
#
# - Recall: Vectors and matrices can carry only one basic type
# of data at a time, numeric OR logical OR character.
matrix(c(T,F),nrow=4,ncol=4)
matrix(paste(LETTERS,letters)[1:25],nrow=5)
# Lists are data structures without this restriction.
# Lists are sequences of anything. They can contain:
# basic data types, vectors, matrices, arrays, lists (recursion),...
# In particular, they can contain arbitrarily nested lists of lists.
# Examples:
list(1,"1",T,F,1E100,NA,-Inf,NULL,1:5,letters[2:5],list(1,2,"a"))
# Balanced binary tree:
list(list(list(1,2),list(3,4)),list(list(5,6),list(7,8)))
#
mylist <- list(vec=1:3, mat=cbind(1:2,3:4), flags=c(T,T,F,T),
lst=list(1:3,1:4))
mylist
# Component names are optional. They can be glued on by force:
x <- list(1,"a",T,NULL); names(x) <- LETTERS[1:4]; x
# - Access to LIST ITEMS is by index with "[[..]]", or by "$name"
# if names exist:
mylist[[2]]
mylist$mat
mylist[["mat"]]
# This is also how lists are printed:
list(1,T)
list(a=1,b=T)
#
# - Access to SUBLISTS is as if the list were a vector:
mylist[2:3] # Sublist of length 2.
mylist[2] # Sublist of length 1.
mylist[c(1,4,1,4,2,3,2,3)] # Out-of-order and expansion.
mylist[c("lst","mat","vec","mat")] # Named access.
mylist["vec"] # Sublist of length 1.
mylist[["vec"]] # Vector!!!
mylist[c(T,F,T,F)] # Logical selection
#
# - Sublists are lists and require list item access to get at
# the components:
mylist[2:3][[1]]
mylist[2:3]$mat
mylist[2][[1]] # Again, "mylist[2]" is a list of length 1...
mylist[2]$mat # Dito.
#
# - Primary use of lists in statistics:
# collecting the results of a complex statistical or numerical
# analysis, such as a regression or an eigendecomposition:
eigen(matrix(1,3,3))
# is a list with two named items: $values and $vectors,
# the first being a vector containing the eigenvalues,
# the second being a matrix with the eigenvectors in the columns.
#
#
#
################
#
# * DATAFRAMES:
#
# - In statistics, data are usually rectangular tables, cases by variables.
# Problem: Variables are not all of the same type.
# Some may be quantitative, hence stored as numeric data,.
# Other variables may be categorical and stored either
# with numeric or character/string codes for categories.
# => Matrices cannot accommodate variables of both types...
# Solution: Data frames. They are similar to matrices,
# but columns may differ in basic data types.
# (The entries have to be basic, not complex.)
#
# Main use of dataframes: data tables with mixed-type variables
#
# Dataframes are printed like matrices, but they are internally
# implemented as lists.
#
# - The function "data.frame()" can bundle conforming vectors,
# matrices, other dataframes into a single dataframe:
myframe <- data.frame(somenumbers=1:3,
somecharacters=c("a","b","c"),
somelogicals=c(T,F,T))
myframe
#
# - Auxiliary functions for dataframes:
is.data.frame(myframe) # Checking type.
as.data.frame(cbind(1:4,11:14)) # Coercing a matrix to a dataframe.
#
# - Many matrix manipulations carry over to dataframes,
# with one exception:
dim(myframe)
nrow(myframe)
ncol(myframe)
rownames(myframe)
colnames(myframe)
myframe[,3] # Col 3 coerced to vector.
myframe[2,] # << Sub-dataframe with row 3, NOT coerced to vector!
myframe[,2:3] # Sub-dataframe consisting of cols 2 and 3.
myframe[2:3,] # Sub-dataframe consisting of rows 2 and 3.
# Why is 'myframe[2,]' not coerced to a vector?
# Because the items might be of differing types!
# Hence the row 'myframe[2,]' is still a dataframe...
#
# (Of course matrix multiplication does not work:)
myframe %*% matrix(rep(1,10), nrow=5)
# - 'read.table()' reads a tabular ASCII/txt file into a dataframe,
# possibly with row and column names:
read.table("fakelaser.dat", header=F)
# The file can be an URL:
url <- "http://www-stat.wharton.upenn.edu/~buja/STAT-541/laser.dat"
my.frame <- read.table(url, header=T)
# This function has many other options for particular cases.
# From the "help()" page:
# read.table(file, header = FALSE, sep = "", quote = "\"'", dec = ".",
# row.names, col.names, as.is = FALSE, na.strings = "NA",
# colClasses = NA, nrows = -1,
# skip = 0, check.names = TRUE, fill = !blank.lines.skip,
# strip.white = FALSE, blank.lines.skip = TRUE,
# comment.char = "#")
# - Since data frames are lists of columns, we can use "lapply()" (list-apply):
lapply(myframe, class)
# The following does not do the right thing:
apply(myframe, 2, class)
# Why? "apply()" expects a matrix in its first argument;
# if it isn't, it gets coerced into a matrix;
# but matrices can't have factors in their columns, only vectors,
# so the factor columns are coerced to character columns.
################################################################
#
# Flow control: loops, conditionals
#
m <- 1:12 #play data
#
# - if-conditional:
if(length(m) > 15) { print("m is greater than 10") }
# With "else" clause:
if(length(m) > 15) {
print("m > 10")
} else {
print("m <= 10")
}
# This sort of thing is most useful inside loops; see below.
# - The vectorized "ifelse()" function:
# Not a flow control construct, but often replaces a combination of
# for-loop and if-else statements.
ifelse(c(T,F,T), c(1,2,3), c(-1,-2,-3))
# The function runs down the three arguments in parallel,
# checks each element in the first argument,
# if true, picks the corresponding element in the second argument,
# if false, picks the corresponding element in the third argument,
# returns a vector/matrix/array of the size of the first argument.
# If the second or third argument are not conforming,
# they are cyclically repeated, as in this implementation of
# 10 Bernoulli trials:
ifelse(runif(10) > 0.5, "H", "T")
# - Note the difference between 'if' and 'ifelse()':
# * 'if' is a syntactic element that dispatches execution
# depending on the outcome of a single logical outcome.
# * 'ifelse()' is a function that takes a logical vector
# and selects from two other vectors depending on the logicals.
# - for-loop: runs over the elements of a vector.
for(i in c(10,100,1000)) { j <- 2*i; print(j); }
# The variable 'i' is the looping variable. It gets set to
# each element of the vector in turn, and for each
# the subsequent, usually composite ({...}) statement gets executed.
# (Braces are optional if there is only one statement in the loop.)
# - repeat-loop: needs a "break" for termination.
repeat{ print("Trying..."); rnum <- runif(1);
if(rnum > 0.9) { print(c("Found one:",rnum)); break } }
# - while-loop:
str <- "Not yet..."
while(runif(1) < 0.99) {
str <- paste(str,".",sep="")
print(str)
}
print("Finally...")
#
#
# - Looping over row or columns of matrices with 'apply()':
m <- matrix(1:12, 3); m
# * Looping over rows::
apply(m, 1, min) # Return a vector with minima of the rows.
# ^^^ the "min()" function, applied to the 1st dimension.
# * Looping over columns:
apply(m, 2, mean) # Return a vector with the means of the columns.
# ^^^^ the "mean()" function, applied to the 2nd dim.
# * 'apply()' can also be used with functions that return vectors,
# such as 'range()' and 'sort()':
apply(m, 2, range)
# There are complications with "apply(m, 1, range)":
# the result is 2xnrow(m), not nrow(m)x2, as one would expect.
# This requires transposition, 't()', to get the expected matrix:
t(apply(m, 1, range))
#
#
# - Looping over elements of a list:
lis <- list(numvec=1:10, charvec=letters,
logicvec=rep(c(T,F),3), mat=cbind(1:3,1))
lis
lapply(lis, length) # returns a list with one number per entry: the length
unlist(lapply(lis, length)) # same but 'flattened' to a vector
sapply(lis, length) # same but Simpler ('Simple apply')
lapply(lis, dim) # returns a list with the dim attribute for each entry
lapply(lis, sort) # returns a list of sorted vectors
################################################################
#
# * STRING MANIPULATION:
#
# R has a number of functions that make the use of other scripting
# languages such as awk, perl, python less and less necessary for
# pattern matching problems. There are functions for searching
# and substituting text patterns expressed as so-called
# "regular expressions". Here is a collection:
#
# paste(..., sep = " ", collapse = NULL)
# substr(x, start, stop)
# substring(text, first, last = 1000000)
# substr(x, start, stop) <- value
# substring(text, first, last = 1000000) <- value
# strsplit(x, split, extended = TRUE)
# nchar(x) # string length
# chartr(old, new, x)
# tolower(x)
# toupper(x)
# match(x, table, nomatch = NA, incomparables = FALSE)
# x %in% table
# pmatch(x, table, nomatch = NA, duplicates.ok = FALSE)
# charmatch(x, table, nomatch = NA)
# grep(pattern, x, ignore.case=FALSE, extended=TRUE, value=FALSE)
# sub(pattern, replacement, x,
# ignore.case=FALSE, extended=TRUE)
# gsub(pattern, replacement, x,
# ignore.case=FALSE, extended=TRUE)
# regexpr(pattern, text, extended=TRUE)
#
# String dimensions when plotting:
# strwidth(s, units = "user", cex = NULL)
# strheight(s, units = "user", cex = NULL)
#
# Example: download the dataset 'dict.dat' from the course page, then...
dict <- scan("DATA/dict.dat",w="",quote="")
sel <- grep("source", dict) # All words containing "source".
dict[sel]
# Stay tuned for HW 3.
#
#
################################################################
#
# * DISTRIBUTIONS: PSEUDO-RANDOM NUMBERS, QUANTILES, CDFS, DENSITIES
#
# The following continuous distributions come with the standard
# version of R, each with
# . a random number generator,
# . a quantile function,
# . a cdf function, and
# . a density fct.
# - Uniform:
# runif(n, min = 0, max = 1)
# qunif(p, min = 0, max = 1, lower.tail = TRUE, log.p = FALSE)
# punif(q, min = 0, max = 1, lower.tail = TRUE, log.p = FALSE)
# dunif(x, min = 0, max = 1, log = FALSE)
#
# - Normal:
# rnorm(n, mean = 0, sd = 1)
# qnorm(p, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE)
# pnorm(q, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE)
# dnorm(x, mean = 0, sd = 1, log = FALSE)
#
# - Student's t:
# rt(n, df)
# qt(p, df, lower.tail = TRUE, log.p = FALSE)
# pt(q, df, ncp, lower.tail = TRUE, log.p = FALSE)
# dt(x, df, log = FALSE)
#
# - F:
# rf(n, df1, df2)
# qf(p, df1, df2, lower.tail = TRUE, log.p = FALSE)
# pf(q, df1, df2, ncp = 0, lower.tail = TRUE, log.p = FALSE)
# df(x, df1, df2, log = FALSE)
#
# - Chi-square:
# rchisq(n,df,ncp=0)
# qchisq(p, df, ncp = 0, lower.tail = TRUE, log.p = FALSE)
# pchisq(q, df, ncp = 0, lower.tail = TRUE, log.p = FALSE)
# dchisq(x, df, ncp = 0, log = FALSE)
#
# - Exponential:
# rexp(n, rate = 1)
# qexp(p, rate = 1, lower.tail = TRUE, log.p = FALSE)
# pexp(q, rate = 1, lower.tail = TRUE, log.p = FALSE)
# dexp(x, rate = 1, log = FALSE)
#
# - Cauchy:
# rcauchy(n, location = 0, scale = 1)
# qcauchy(p, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE)
# pcauchy(q, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE)
# dcauchy(x, location = 0, scale = 1, log = FALSE)
#
# - Beta:
# rbeta(n,shape1,shape2)
# qbeta(p, shape1, shape2, lower.tail = TRUE, log.p = FALSE)
# pbeta(q, shape1, shape2, ncp = 0, lower.tail = TRUE, log.p = FALSE)
# dbeta(x, shape1, shape2, ncp = 0, log = FALSE)
#
# - Gamma:
# rgamma(n,shape,rate=1,scale=1/rate)
# qgamma(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE,
# pgamma(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE,
# dgamma(x, shape, rate = 1, scale = 1/rate, log = FALSE)
#
# Some uses:
# - Random number generators are used, for example,
# . in simulations, as when evaluating the efficiency
# of statistical methods;
# . in Bootstrap inference for frequentist statistics;
# . in Bayesian computing to simulate posterior distributions.
# Base of simulations: law of large numbers (rel. freq. approx. probs.)
# - The quantile functions are used
# . to find critical values in statistical tests;
# . in q-q-plots to check empirical against theoretical distributions
# (see below).
# - The cdf functions are used for calculating p-values in
# statistical tests.
# - The normal density is sometimes used as a "kernel" in
# smoothing methods.
#
# - Simulating discrete distributions and sampling:
# The function
sample
# serves many purposes. It does sampling with and without replacement.
#
# - Bernoulli trials and Binomials: e.g., 10 head/tails, biased coin flip,
ifelse(runif(10)<.55, "H", "T")
sample(c("H","T"), size=10, repl=T, prob=c(0.55,0.45))
rbinom(n=10, size=1, p=.55)
# (Sampling WITH replacement is also called i.i.d. sampling.)
#
# - Multinomial: e.g., 10 draws from uniform distribution on 4 objects
sample(1:4, size=10, repl=T, prob=rep(1/4,4))
dmultinom; rmultinom
#
# - Sampling WITH replacement:
sample(1:10, 5) # Samples w/o replacement (default).
sample(10) # Random permutation of 1:10.
# - The function can also be applied to non-numeric data:
sample(letters) # A permutation of letters.
sample(letters, 10) # 10 distinct random letters.
sample(letters, 10, repl=T) # 10 i.i.d. samples of letters.
sample(letters, 100) # Error! Sampling w/o replacement.
sample(letters, 100, repl=T) # Fine! Sampling with replacement.
#
# 'sample()' is used to implement modern resampling methods such as
# the bootstrap and permutation tests (see below).
#
# . Important note on seeding random number generators:
# The "seed" stores the state of the pseudo-random number generator.
.Random.seed
runif(1)
.Random.seed # The seed has changed.
Random.seed.store <- .Random.seed # Store the seed.
runif(1) # (*) Remember this random number.
runif(10) # Now push the generator 10 times.
.Random.seed <- Random.seed.store # Force it back to recreate (*)
runif(1)
# Indeed, this is the same random number as in (*).
# Storing the seed is important for simulations that must be
# reproduceable, for example, when putting simulation results
# in a publication.
#
################################################################
#
# * PLOTTING:
#
# - The par() function for querying and setting plotting parameters.
help(par)
#
# Some important parameters to set:
par(mfrow=c(3,4), # 3x4 layout of 12 plots, filling rows first (mfcol)
mgp=c(2.0, 0.5, 0), # distance of axis title ("x","y"),
# numbers at the ticks, border line,
# (in margin lines)
mar=c(3,2.5,2.5,1.5), # margins: bottom-left-top-right (in lines)
oma=rep(1,4), # outer margin around the collection
# of plots on the page (in lines)
mex=0.8, # expansion of margin line width,
# relative to default
cex=0.8) # expansion of characters, relative to default
#
# - List of all possible plot characters:
windows(width=10, height=3)
plot(1:25, rep(1,25), pch=1:25, xlab="", ylab="", yaxt="n", cex=2)
#
#
################################################################
#
# WRITING FUNCTIONS:
#
# Syntax: simplest case
fun1 <- function(x) { plot(x) }
fun2 <- function(x) { 2*x }
# Semantics 1: fun1 creates a 'side effect', plotting the input argument;
# fun2 returns a result, the doubled input argument.
fun1(1:10) # plots 1,2,...,10
fun2(1:10) # returns the vector containing 2,4,...,20
# Semantics 2: Functions are 'first class citizens', that is,
# they can be assigned, printed, passed as arguments,...
fun2 # Print the function.
print(fun2) # Same.
funx <- fun2; funx(1:5) # Application: len <- length; len(1:10)
# What is this?
(function(x) { 2*x })(1:10)
# It's getting weird:
funp <- function(f) print(f)
funp(funp)
# Assignment:
x <- fun2(1:10); x # Fine.
x <- fun1(1:10); x # Assigns NULL, but plots. 'fun1()' is for side effect.
# Syntax: The following are equivalent ways of writing fun2().
fun2 <- function(x) { 2*x } # The original.
fun2 <- function(x) 2*x # Single statement bodies don't need braces.
fun2 <- function(x) { return(2*x) } # Make the return explicit.
fun2 <- function(x) { y <- 2*x; y } # Value of last statement is returned.
fun2 <- function(x) { y <- 2*x; return(y) } # Dito
# Functions with more arguments and complex bodies:
fun3 <- function(t, q) {
par(mgp=c(1.8,.5,0), mar=c(3,3,2,1))
plot(t, q, pch=16, cex=.5)
}
fun3(t=1:100, q=rnorm(100))
# Function calls with named arguments: independent of order of arguments
fun3(q=rnorm(100), t=1:100)
# Functions with defaulted arguments: defaults can use preceding arguments
fun4 <- function(y=rnorm(100), x=1:length(y), cex=.5, pch=16, w=5, h=5, new=T) {
if(new)
par(mgp=c(1.8,.5,0), mar=c(3,3,2,1))
plot(x, y, pch=pch, cex=cex) # 1st 'cex': argument name
# 2nd 'cex': value to be passed to plot
}
fun4() # All arguments defaulted.
fun4(1:100, rnorm(100)) # 1st and 2nd argument passed by order.
fun4(1:100, rnorm(100), new=F) # Last argument is 7th by order, so needs name.
fun4(1:100, rnorm(100), new=F, cex=.2)
fun4(1:100, rnorm(100), new=F, cex=1, pch=3)
fun4(x=1:100, y=rnorm(100), pch=2, w=10, h=2) # x= and y= not needed but more readable
fun4(cex=.2)
# Functions returning complex data structures:
fun5 <- function(x) {
list(N=length(x),
location=c(
mean=mean(x),
mean.tr.05=mean(x,trim=.05),
mean.tr.10=mean(x,trim=.10),
mean.tr.25=mean(x,trim=.25),
median=median(x)
),
dispersion=c(
sdev=sd(x),
mad=mad(x),
IQR=IQR(x)
)
)
}
summaries <- fun5(rnorm(10000))
summaries
# Semantics 3: All passing of arguments is by value, not reference/name.
# In other words, all arguments get copied into the body of the function.
fun5 <- function(x) { x[1] <- Inf; x }
x <- 1:3; fun5(x); x
# In the above line, fun5() is working on a copy of x, also called x.
# Semantics 4: Function bodies have access to outside variables.
fun6 <- function() { x[1] <- Inf; x }
x <- 1:3; fun6()
x # 'x' was not changed. The body of fun6() worked on a copy of x.
# Rule: Do not use this feature of R semantics.
# Pass all data into a function as arguments.
# (See Section 10.7 of Venables & Smith on "scoping rules" in R.)
# Functions called inside functions:
fun7 <- function(x) { rbind(x, fun2(x)) }
fun7(1:4)
# (This is an exception to the rule of not relying on the scoping rules of R.)
# Functions defined inside functions:
fun8 <- function(x) { fun9 <- function(x) { 2*x }; fun9(x) }
fun8(1:5)
fun9(1:5) # Does not exist; defined only inside the body.
# Functions passed as arguments into other functions:
x <- matrix(rnorm(1000),100,10)
fun9 <- function(x) quantile(x, prob=c(.25,.50,.75))
apply(x, 2, fun9)
# Equivalent:
apply(x, 2, function(x) quantile(x, prob=c(.25,.50,.75)))
################################################################
#
# * QUERYING R OBJECTS:
#
# Asking for the primary data type:
class(1:10)
class(T)
class(letters)
class(swiss) # Like 'letters', 'swiss' comes with R.
#
# Queries about basic data types:
is.numeric(1:10)
is.character(letters)
is.logical(F)
is.na(NA) # !!!!!!!!!!! use for removal of NAs:
x <- rnorm(100); x[rnorm(100)>1] <- NA; is.na(x); x[!is.na(x)]
is.infinite(-Inf)
is.null(NULL)
#
# Queries about complex data types:
is.vector(letters)
is.array(array(1:12,c(2,3,2)))
is.matrix(swiss)
is.data.frame(swiss)
is.list(swiss)
is.function(mean)
#
# All the query functions that start with "is.":
apropos("^is[.]") # '.' is a special character in regular expressions.
#
# Querying components of lists and columns in data frames:
names(swiss)
# Querying row and column names in matrices and data frames:
colnames(swiss) # Same as names() for data frames.
rownames(swiss)
#
################################################################
#
# * MORE ON R: A MENTAL MODEL FOR R
#
# black box with inner state described by
# * Variables x, y, z, err, ..., pi, letters, LETTERS, ...
# * Their values, that is, the data structures such as
# values, vectors, matrices,...
# to which the variables point.
# * The black box has an engine, the intepreter,
# that acts on the state of the box.
# * The state of the black box is changed by executing
# assignments, such as x <- runif(100)
# * On quitting q() you decide whether to save the newly
# formed variables and their values.
# * Learn about the state of the black box by executing:
# . ls() to learn what variables are bound
# . print(x) to learn what the value of the variable is
# * There are two additional black boxes:
# . par(), ... to learn about the values and data structures
# of two sub-boxes:
# - Plotting box:
# . State: plotting parameters such as character size, margin width,...
# . Reports the settings of the plotting parameters:
par()
# . State can be changed by executing, say,
par(mfrow=c(2,2))
# - Random number generator:
# . State: the seed vector
.Random.seed
# . State changes when 1) a random number is generated, or
# 2) .Random.seed is set by the user
# - Printing and other parameters, e.g., line width, number of digits...
options()
#
#
################################################################
#
# Changing the working directory:
# In the standard interface, one can change the working directory...
# - temporarily: -> File -> Change Dir
# - permanently: R-click on R icon -> Edit Properties -> Start in:
#
################################################################
|
## File containing the OpEC4photo and all it auxiliary functions
OpEC4photo <- function(obsDat,iVcmax=60,iVpmax=120,
iVpr=80,iJmax=400,co2=380,o2=210,level=0.95){
if(iVpr != 80)
warning("\n iVpr is not optimized at the moment\n")
coef <- c(iVcmax,iVpmax,iJmax)
obsvec <- as.vector(obsDat[,1])
SST <- t(obsvec)%*%(obsvec)
RSS <- function(coefs){
vec1 <- eC4photo(obsDat[,2],obsDat[,3],obsDat[,4],
co2,o2,coefs[1],coefs[2],iVpr,
coefs[3])$Assim
rss <- t(obsvec - vec1)%*%(obsvec - vec1)
rss
}
resp <- optim(coef,RSS,method="L-BFGS-B",
lower=c(5,5,5),
hessian=TRUE)
bestParms <- resp$par
ReSumS <- resp$value
conv <- resp$convergence
HessMat <- resp$hessian
iHess <- solve(HessMat)
def <- nrow(obsDat)-length(coef)
sigm <- ReSumS/def
varcov <- sigm * iHess
## Need to add the correlation matrix
## Calculating confidence intervals
alp <- (1 - level)/2
## Vcmax
lcVcmax <- bestParms[1] + qt(alp,def)*sqrt(varcov[1,1])
ucVcmax <- bestParms[1] + qt(1-alp,def)*sqrt(varcov[1,1])
## Vpmax
lcVpmax <- bestParms[2] + qt(alp,def)*sqrt(varcov[2,2])
ucVpmax <- bestParms[2] + qt(1-alp,def)*sqrt(varcov[2,2])
## Jmax
lcJmax <- bestParms[3] + qt(alp,def)*sqrt(varcov[3,3])
ucJmax <- bestParms[3] + qt(1-alp,def)*sqrt(varcov[3,3])
structure(list(bestVcmax=bestParms[1],
bestVpmax=bestParms[2],
bestJmax=bestParms[3],
ReSumS=as.numeric(ReSumS),
Convergence=conv,
VarCov=varcov,df=def,
ciVcmax=c(lcVcmax,ucVcmax),
ciVpmax=c(lcVpmax,ucVpmax),
ciJmax=c(lcJmax,ucJmax),
level=level,data=obsDat)
, class = "OpEC4photo")
}
RsqEC4photo <- function(obsDat, iVcmax=60,iVpmax=120,
iVpr=80,iJmax=400,co2=380,o2=210,
type=c("Assim","StomCond")){
coef <- c(iVcmax,iVpmax,iVpr,iJmax)
type <- match.arg(type)
if(type == "Assim"){
if(max(obsDat[,1]) < 1)
warning("Units of Assim might be wrong:
should be micro mol m-2 s-1\n")
vec1 <- eC4photo(obsDat[,2],obsDat[,3],obsDat[,4],
co2,o2,coef[1],coef[2],coef[3],
coef[4])$Assim
}
if(type == "StomCond"){
if(max(obsDat[,1]) < 1)
warning("Units of StomCond might be wrong:
should be mmol m-2 s-1\n")
vec1 <- eC4photo(obsDat[,2],obsDat[,3],obsDat[,4],
co2,o2,coef[1],coef[2],coef[3],
coef[4])$Assim
}
obsvec <- as.matrix(obsDat[,1])
SST <- t(obsvec)%*%(obsvec)
SSE <- t(obsvec - vec1)%*%(obsvec - vec1)
Rsquare <- 1 - SSE/SST
if(Rsquare < 0)
stop("negative Rsq\n")
Rsquare
}
print.OpEC4photo <- function(x,...){
cat("\nOptimization of C4 photosynthesis\n")
cat("\nbest Vcmax:",x$bestVcmax,"\n")
cat("\n",x$level*100,"% Conf Interval Vcmax")
cat("\n",x$ciVcmax,"\n")
cat("\nbest Vpmax:",x$bestVpmax,"\n")
cat("\n",x$level*100,"% Conf Interval Vpmax")
cat("\n",x$ciVpmax,"\n")
cat("\nbest Jmax:",x$bestJmax,"\n")
cat("\n",x$level*100,"% Conf Interval Jmax")
cat("\n",x$ciJmax,"\n")
cat("\nConvergence:")
if(x$Convergence == 0) cat("YES\n")
else cat("NO\n")
invisible(x)
}
summary.OpEC4photo <- function(object,...){
dat <- object$data
obsvec <- as.vector(dat[,1])
fittd <- eC4photo(dat[,2],dat[,3],dat[,4],ca=380,oa=210,
object$bestVcmax,object$bestVpmax,80,object$bestJmax)
rsd <- obsvec - fittd$Assim
rss <- object$ReSumS
## Some measures of agreement
## Index of agreement
IAN <- t(rsd)%*%rsd
IAD1 <- abs(rsd) + abs(scale(obsvec,scale=FALSE))
IAD <- t(IAD1)%*%IAD1
IA <- 1 - IAN/IAD
## Rsquared 1
Rsq1 <- as.numeric(1 - rss / t(obsvec)%*%obsvec)
## Rsquared 2
Rsq2 <- as.numeric(cor(fittd$Assim,obsvec)^2)
## Mean bias
meanBias <- mean(rsd)
## AIC and BIC
n1 <- length(rsd)
AIC <- n1 * log(rss/n1) + 2
BIC <- n1 * log(rss/n1) + 2 * log(n1)
sigma <- sqrt(rss/(n1-2))
stdresid <- rsd/sigma
outli <- which(abs(stdresid) > 2)
structure(list(fitted=fittd$Assim,resid=rsd,
stdresid=stdresid,
IA=IA,Rsq1=Rsq1,Rsq2=Rsq2,
meanBias=meanBias,
AIC=AIC,BIC=BIC,
outli=outli,
sigma=sigma),class="summary.OpEC4photo")
}
## Need to add a plotting method
| /R/OpEC4photo.R | permissive | serbinsh/biocro-1 | R | false | false | 4,531 | r | ## File containing the OpEC4photo and all it auxiliary functions
OpEC4photo <- function(obsDat,iVcmax=60,iVpmax=120,
iVpr=80,iJmax=400,co2=380,o2=210,level=0.95){
if(iVpr != 80)
warning("\n iVpr is not optimized at the moment\n")
coef <- c(iVcmax,iVpmax,iJmax)
obsvec <- as.vector(obsDat[,1])
SST <- t(obsvec)%*%(obsvec)
RSS <- function(coefs){
vec1 <- eC4photo(obsDat[,2],obsDat[,3],obsDat[,4],
co2,o2,coefs[1],coefs[2],iVpr,
coefs[3])$Assim
rss <- t(obsvec - vec1)%*%(obsvec - vec1)
rss
}
resp <- optim(coef,RSS,method="L-BFGS-B",
lower=c(5,5,5),
hessian=TRUE)
bestParms <- resp$par
ReSumS <- resp$value
conv <- resp$convergence
HessMat <- resp$hessian
iHess <- solve(HessMat)
def <- nrow(obsDat)-length(coef)
sigm <- ReSumS/def
varcov <- sigm * iHess
## Need to add the correlation matrix
## Calculating confidence intervals
alp <- (1 - level)/2
## Vcmax
lcVcmax <- bestParms[1] + qt(alp,def)*sqrt(varcov[1,1])
ucVcmax <- bestParms[1] + qt(1-alp,def)*sqrt(varcov[1,1])
## Vpmax
lcVpmax <- bestParms[2] + qt(alp,def)*sqrt(varcov[2,2])
ucVpmax <- bestParms[2] + qt(1-alp,def)*sqrt(varcov[2,2])
## Jmax
lcJmax <- bestParms[3] + qt(alp,def)*sqrt(varcov[3,3])
ucJmax <- bestParms[3] + qt(1-alp,def)*sqrt(varcov[3,3])
structure(list(bestVcmax=bestParms[1],
bestVpmax=bestParms[2],
bestJmax=bestParms[3],
ReSumS=as.numeric(ReSumS),
Convergence=conv,
VarCov=varcov,df=def,
ciVcmax=c(lcVcmax,ucVcmax),
ciVpmax=c(lcVpmax,ucVpmax),
ciJmax=c(lcJmax,ucJmax),
level=level,data=obsDat)
, class = "OpEC4photo")
}
RsqEC4photo <- function(obsDat, iVcmax=60,iVpmax=120,
iVpr=80,iJmax=400,co2=380,o2=210,
type=c("Assim","StomCond")){
coef <- c(iVcmax,iVpmax,iVpr,iJmax)
type <- match.arg(type)
if(type == "Assim"){
if(max(obsDat[,1]) < 1)
warning("Units of Assim might be wrong:
should be micro mol m-2 s-1\n")
vec1 <- eC4photo(obsDat[,2],obsDat[,3],obsDat[,4],
co2,o2,coef[1],coef[2],coef[3],
coef[4])$Assim
}
if(type == "StomCond"){
if(max(obsDat[,1]) < 1)
warning("Units of StomCond might be wrong:
should be mmol m-2 s-1\n")
vec1 <- eC4photo(obsDat[,2],obsDat[,3],obsDat[,4],
co2,o2,coef[1],coef[2],coef[3],
coef[4])$Assim
}
obsvec <- as.matrix(obsDat[,1])
SST <- t(obsvec)%*%(obsvec)
SSE <- t(obsvec - vec1)%*%(obsvec - vec1)
Rsquare <- 1 - SSE/SST
if(Rsquare < 0)
stop("negative Rsq\n")
Rsquare
}
print.OpEC4photo <- function(x,...){
cat("\nOptimization of C4 photosynthesis\n")
cat("\nbest Vcmax:",x$bestVcmax,"\n")
cat("\n",x$level*100,"% Conf Interval Vcmax")
cat("\n",x$ciVcmax,"\n")
cat("\nbest Vpmax:",x$bestVpmax,"\n")
cat("\n",x$level*100,"% Conf Interval Vpmax")
cat("\n",x$ciVpmax,"\n")
cat("\nbest Jmax:",x$bestJmax,"\n")
cat("\n",x$level*100,"% Conf Interval Jmax")
cat("\n",x$ciJmax,"\n")
cat("\nConvergence:")
if(x$Convergence == 0) cat("YES\n")
else cat("NO\n")
invisible(x)
}
summary.OpEC4photo <- function(object,...){
dat <- object$data
obsvec <- as.vector(dat[,1])
fittd <- eC4photo(dat[,2],dat[,3],dat[,4],ca=380,oa=210,
object$bestVcmax,object$bestVpmax,80,object$bestJmax)
rsd <- obsvec - fittd$Assim
rss <- object$ReSumS
## Some measures of agreement
## Index of agreement
IAN <- t(rsd)%*%rsd
IAD1 <- abs(rsd) + abs(scale(obsvec,scale=FALSE))
IAD <- t(IAD1)%*%IAD1
IA <- 1 - IAN/IAD
## Rsquared 1
Rsq1 <- as.numeric(1 - rss / t(obsvec)%*%obsvec)
## Rsquared 2
Rsq2 <- as.numeric(cor(fittd$Assim,obsvec)^2)
## Mean bias
meanBias <- mean(rsd)
## AIC and BIC
n1 <- length(rsd)
AIC <- n1 * log(rss/n1) + 2
BIC <- n1 * log(rss/n1) + 2 * log(n1)
sigma <- sqrt(rss/(n1-2))
stdresid <- rsd/sigma
outli <- which(abs(stdresid) > 2)
structure(list(fitted=fittd$Assim,resid=rsd,
stdresid=stdresid,
IA=IA,Rsq1=Rsq1,Rsq2=Rsq2,
meanBias=meanBias,
AIC=AIC,BIC=BIC,
outli=outli,
sigma=sigma),class="summary.OpEC4photo")
}
## Need to add a plotting method
|
## -----------------------------------------------------------------------------
## Code for Machine Learning with caret at NYR in 2019 by Max Kuhn
## -----------------------------------------------------------------------------
## Prelims
library(tidymodels)
thm <- theme_bw() +
theme(
panel.background = element_rect(fill = "transparent", colour = NA),
plot.background = element_rect(fill = "transparent", colour = NA),
legend.position = "top",
legend.background = element_rect(fill = "transparent", colour = NA),
legend.key = element_rect(fill = "transparent", colour = NA)
)
theme_set(thm)
## -----------------------------------------------------------------------------
library(AmesHousing)
ames <- make_ames()
ggplot(ames, aes(x = Year_Built, y = Sale_Price)) +
geom_point(alpha = 0.3) +
scale_y_log10() +
geom_smooth()
ggplot(ames, aes(x = Lot_Area, y = Sale_Price)) +
geom_point(alpha = 0.3) +
scale_y_log10() +
scale_x_log10() +
geom_smooth()
ggplot(ames, aes(x = Alley, y = Sale_Price)) +
scale_y_log10() +
geom_boxplot()
ggplot(ames, aes(x = Year_Built, y = Sale_Price)) +
geom_point(alpha = 0.3) +
scale_y_log10() +
geom_smooth() +
facet_wrap(~ Alley)
ames %>%
dplyr::select(Sale_Price, Alley, Neighborhood, Enclosed_Porch) %>%
tidyr::gather(level, value, -Sale_Price) %>%
dplyr::group_by(level, value) %>%
dplyr::summarise(mean = mean(Sale_Price),
max = max(Sale_Price))
## -----------------------------------------------------------------------------
library(caret)
library(dplyr) # load this _after_ caret
ames <-
make_ames() %>%
# Remove quality-related predictors
dplyr::select(-matches("Qu"))
nrow(ames)
# Make sure that you get the same random numbers
set.seed(4595)
in_train <- createDataPartition(ames$Sale_Price, p = 3/4, list = FALSE)
ames_train <- ames[ in_train,]
ames_test <- ames[-in_train,]
nrow(ames_train)/nrow(ames)
## -----------------------------------------------------------------------------
ggplot(ames_train, aes(x = Sale_Price)) +
geom_line(stat = "density", trim = TRUE) +
geom_line(data = ames_test,
stat = "density",
trim = TRUE, col = "red")
## -----------------------------------------------------------------------------
## model_fn(Sale_Price ~ Neighborhood + Year_Sold + Neighborhood:Year_Sold, data = ames_train)
## model_fn(Sale_Price ~ ., data = ames_train)
## model_fn(log10(Sale_Price) ~ ns(Longitude, df = 3) + ns(Latitude, df = 3), data = ames_train)
## # Usually, the variables must all be numeric
## pre_vars <- c("Year_Sold", "Longitude", "Latitude")
## model_fn(x = ames_train[, pre_vars],
## y = ames_train$Sale_Price)
## -----------------------------------------------------------------------------
simple_lm <- lm(log10(Sale_Price) ~ Longitude + Latitude, data = ames_train)
simple_lm_values <- broom::augment(simple_lm)
names(simple_lm_values)
summary(simple_lm)
## -----------------------------------------------------------------------------
library(caret)
ctrl <-
trainControl(
method = "cv", # defaults to 10-fold
savePredictions = "final" # save the holdout predictions
)
## -----------------------------------------------------------------------------
set.seed(5616)
lm_fit <- train(log10(Sale_Price) ~ Latitude + Longitude,
data = ames_train,
method = "lm",
trControl = ctrl)
lm_fit
## -----------------------------------------------------------------------------
library(purrr)
holdout_results <-
lm_fit %>%
pluck("pred") %>%
mutate(resid = obs - pred) %>%
arrange(rowIndex) %>%
bind_cols(ames_train)
holdout_results %>%
dplyr::select(obs, pred, Resample, Neighborhood, Alley) %>%
dplyr::slice(1:7)
ggplot(holdout_results, aes(y = resid, x = Second_Flr_SF)) +
geom_point(alpha = .5)
ggplot(holdout_results,
aes(y = resid, x = Year_Built, col = Central_Air)) +
geom_point(alpha = .5)+
facet_wrap(~ Central_Air) +
geom_smooth(se = FALSE, col = "black")
## -----------------------------------------------------------------------------
simple_knn <- knnreg(log10(Sale_Price) ~ Longitude + Latitude, data = ames_train, k = 2)
naive_rmse <- caret::RMSE(pred = predict(simple_knn, ames_train), obs = log10(ames_train$Sale_Price))
naive_rmse
## -----------------------------------------------------------------------------
set.seed(5616)
knn_fit <- train(log10(Sale_Price) ~ Longitude + Latitude, data = ames_train,
method = "knn", trControl = ctrl,
tuneGrid = data.frame(k = 2))
getTrainPerf(knn_fit)
# Before:
naive_rmse
## -----------------------------------------------------------------------------
rs <- resamples(list(lm = lm_fit, knn = knn_fit), metric = "RMSE")
rmse_values <-
rs %>%
pluck("values") %>%
dplyr::select(Resample, `lm~RMSE`, `knn~RMSE`) %>%
dplyr::rename(lm = `lm~RMSE`, knn = `knn~RMSE`)
rmse_corr <- cor(rmse_values$lm, rmse_values$knn)
## -----------------------------------------------------------------------------
rmse_values %>%
gather(model, RMSE, -Resample) %>%
ggplot(aes(x = model, y = RMSE, group = Resample, col = Resample)) +
geom_line() +
theme(legend.position = "none")
## -----------------------------------------------------------------------------
compare_models(lm_fit, knn_fit)
## -----------------------------------------------------------------------------
# Leave the others at their defaults
knn_grid <- data.frame(k = 1:20)
ctrl <- trainControl(method = "cv", savePredictions = "final", returnResamp = "all")
## -----------------------------------------------------------------------------
set.seed(5616)
knn_tuned <-
train(log10(Sale_Price) ~ Longitude + Latitude,
data = ames_train,
method = "knn",
trControl = ctrl,
tuneGrid = knn_grid)
knn_tuned
## -----------------------------------------------------------------------------
ggplot(knn_tuned)
## -----------------------------------------------------------------------------
getTrainPerf(knn_tuned)
knn_tuned$bestTune
# since `savePredictions = "final"`:
knn_tuned$pred %>% slice(1:3)
## -----------------------------------------------------------------------------
knn_tuned %>% pluck("resample") %>% slice(1:3)
## -----------------------------------------------------------------------------
ggplot(knn_tuned) +
geom_line(data = knn_tuned$resample,
aes(group = Resample, col = Resample),
alpha = .3, lwd = 1) +
theme(legend.position = "none")
## -----------------------------------------------------------------------------
# Based on final model with optimized `k`
predict(knn_tuned, ames_test %>% slice(1:3))
| /Part_1_Basic_Principles.R | no_license | topepo/nyr-2019 | R | false | false | 6,840 | r | ## -----------------------------------------------------------------------------
## Code for Machine Learning with caret at NYR in 2019 by Max Kuhn
## -----------------------------------------------------------------------------
## Prelims
library(tidymodels)
thm <- theme_bw() +
theme(
panel.background = element_rect(fill = "transparent", colour = NA),
plot.background = element_rect(fill = "transparent", colour = NA),
legend.position = "top",
legend.background = element_rect(fill = "transparent", colour = NA),
legend.key = element_rect(fill = "transparent", colour = NA)
)
theme_set(thm)
## -----------------------------------------------------------------------------
library(AmesHousing)
ames <- make_ames()
ggplot(ames, aes(x = Year_Built, y = Sale_Price)) +
geom_point(alpha = 0.3) +
scale_y_log10() +
geom_smooth()
ggplot(ames, aes(x = Lot_Area, y = Sale_Price)) +
geom_point(alpha = 0.3) +
scale_y_log10() +
scale_x_log10() +
geom_smooth()
ggplot(ames, aes(x = Alley, y = Sale_Price)) +
scale_y_log10() +
geom_boxplot()
ggplot(ames, aes(x = Year_Built, y = Sale_Price)) +
geom_point(alpha = 0.3) +
scale_y_log10() +
geom_smooth() +
facet_wrap(~ Alley)
ames %>%
dplyr::select(Sale_Price, Alley, Neighborhood, Enclosed_Porch) %>%
tidyr::gather(level, value, -Sale_Price) %>%
dplyr::group_by(level, value) %>%
dplyr::summarise(mean = mean(Sale_Price),
max = max(Sale_Price))
## -----------------------------------------------------------------------------
library(caret)
library(dplyr) # load this _after_ caret
ames <-
make_ames() %>%
# Remove quality-related predictors
dplyr::select(-matches("Qu"))
nrow(ames)
# Make sure that you get the same random numbers
set.seed(4595)
in_train <- createDataPartition(ames$Sale_Price, p = 3/4, list = FALSE)
ames_train <- ames[ in_train,]
ames_test <- ames[-in_train,]
nrow(ames_train)/nrow(ames)
## -----------------------------------------------------------------------------
ggplot(ames_train, aes(x = Sale_Price)) +
geom_line(stat = "density", trim = TRUE) +
geom_line(data = ames_test,
stat = "density",
trim = TRUE, col = "red")
## -----------------------------------------------------------------------------
## model_fn(Sale_Price ~ Neighborhood + Year_Sold + Neighborhood:Year_Sold, data = ames_train)
## model_fn(Sale_Price ~ ., data = ames_train)
## model_fn(log10(Sale_Price) ~ ns(Longitude, df = 3) + ns(Latitude, df = 3), data = ames_train)
## # Usually, the variables must all be numeric
## pre_vars <- c("Year_Sold", "Longitude", "Latitude")
## model_fn(x = ames_train[, pre_vars],
## y = ames_train$Sale_Price)
## -----------------------------------------------------------------------------
simple_lm <- lm(log10(Sale_Price) ~ Longitude + Latitude, data = ames_train)
simple_lm_values <- broom::augment(simple_lm)
names(simple_lm_values)
summary(simple_lm)
## -----------------------------------------------------------------------------
library(caret)
ctrl <-
trainControl(
method = "cv", # defaults to 10-fold
savePredictions = "final" # save the holdout predictions
)
## -----------------------------------------------------------------------------
set.seed(5616)
lm_fit <- train(log10(Sale_Price) ~ Latitude + Longitude,
data = ames_train,
method = "lm",
trControl = ctrl)
lm_fit
## -----------------------------------------------------------------------------
library(purrr)
holdout_results <-
lm_fit %>%
pluck("pred") %>%
mutate(resid = obs - pred) %>%
arrange(rowIndex) %>%
bind_cols(ames_train)
holdout_results %>%
dplyr::select(obs, pred, Resample, Neighborhood, Alley) %>%
dplyr::slice(1:7)
ggplot(holdout_results, aes(y = resid, x = Second_Flr_SF)) +
geom_point(alpha = .5)
ggplot(holdout_results,
aes(y = resid, x = Year_Built, col = Central_Air)) +
geom_point(alpha = .5)+
facet_wrap(~ Central_Air) +
geom_smooth(se = FALSE, col = "black")
## -----------------------------------------------------------------------------
simple_knn <- knnreg(log10(Sale_Price) ~ Longitude + Latitude, data = ames_train, k = 2)
naive_rmse <- caret::RMSE(pred = predict(simple_knn, ames_train), obs = log10(ames_train$Sale_Price))
naive_rmse
## -----------------------------------------------------------------------------
set.seed(5616)
knn_fit <- train(log10(Sale_Price) ~ Longitude + Latitude, data = ames_train,
method = "knn", trControl = ctrl,
tuneGrid = data.frame(k = 2))
getTrainPerf(knn_fit)
# Before:
naive_rmse
## -----------------------------------------------------------------------------
rs <- resamples(list(lm = lm_fit, knn = knn_fit), metric = "RMSE")
rmse_values <-
rs %>%
pluck("values") %>%
dplyr::select(Resample, `lm~RMSE`, `knn~RMSE`) %>%
dplyr::rename(lm = `lm~RMSE`, knn = `knn~RMSE`)
rmse_corr <- cor(rmse_values$lm, rmse_values$knn)
## -----------------------------------------------------------------------------
rmse_values %>%
gather(model, RMSE, -Resample) %>%
ggplot(aes(x = model, y = RMSE, group = Resample, col = Resample)) +
geom_line() +
theme(legend.position = "none")
## -----------------------------------------------------------------------------
compare_models(lm_fit, knn_fit)
## -----------------------------------------------------------------------------
# Leave the others at their defaults
knn_grid <- data.frame(k = 1:20)
ctrl <- trainControl(method = "cv", savePredictions = "final", returnResamp = "all")
## -----------------------------------------------------------------------------
set.seed(5616)
knn_tuned <-
train(log10(Sale_Price) ~ Longitude + Latitude,
data = ames_train,
method = "knn",
trControl = ctrl,
tuneGrid = knn_grid)
knn_tuned
## -----------------------------------------------------------------------------
ggplot(knn_tuned)
## -----------------------------------------------------------------------------
getTrainPerf(knn_tuned)
knn_tuned$bestTune
# since `savePredictions = "final"`:
knn_tuned$pred %>% slice(1:3)
## -----------------------------------------------------------------------------
knn_tuned %>% pluck("resample") %>% slice(1:3)
## -----------------------------------------------------------------------------
ggplot(knn_tuned) +
geom_line(data = knn_tuned$resample,
aes(group = Resample, col = Resample),
alpha = .3, lwd = 1) +
theme(legend.position = "none")
## -----------------------------------------------------------------------------
# Based on final model with optimized `k`
predict(knn_tuned, ames_test %>% slice(1:3))
|
\name{constr.draw}
\alias{constr.draw}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Draw from constrained full conditional
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
constr.draw(mean, var, a, b)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{mean}{
%% ~~Describe \code{mean} here~~
}
\item{var}{
%% ~~Describe \code{var} here~~
}
\item{a}{
%% ~~Describe \code{a} here~~
}
\item{b}{
%% ~~Describe \code{b} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (mean, var, a, b)
{
d <- pnorm(a, mean, sqrt(var)) + runif(1) * (pnorm(b, mean,
sqrt(var)) - pnorm(a, mean, sqrt(var)))
theta <- qnorm(d, mean, sqrt(var))
return(theta)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/constr.draw.Rd | no_license | scristia/MixtureModel | R | false | false | 1,606 | rd | \name{constr.draw}
\alias{constr.draw}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Draw from constrained full conditional
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
constr.draw(mean, var, a, b)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{mean}{
%% ~~Describe \code{mean} here~~
}
\item{var}{
%% ~~Describe \code{var} here~~
}
\item{a}{
%% ~~Describe \code{a} here~~
}
\item{b}{
%% ~~Describe \code{b} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (mean, var, a, b)
{
d <- pnorm(a, mean, sqrt(var)) + runif(1) * (pnorm(b, mean,
sqrt(var)) - pnorm(a, mean, sqrt(var)))
theta <- qnorm(d, mean, sqrt(var))
return(theta)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(httr)
library(tidyquant)
library(zoo) ## for data processing
library(tidyverse)
library(forecast)
library(Quandl)
library(tseries)
library(rugarch)
dat <- tq_get("AAPL",
from = "2010-01-01",
to = "2019-12-30") %>%
mutate(return = log(adjusted / lag(adjusted))) %>%
filter(!is.na(return))
return <- dat$return
# price series ------
ggplot(dat, aes(x = date, y = adjusted)) +
geom_line() +
labs(title = 'Apple adjusted price
1 Jan 2010 - 30 Dec 2019',
x = "Year",
y = "Adjusted price")
# Return versus date ------
ggplot(dat, aes(x = date, y = 100*return)) + geom_line() +
ylim(-15, 15) +
labs(title = 'Apple (log) return (%)
1 Jan 2010 - 30 Dec 2019',
x = "Year",
y = "Return (%)")
# Abs Return versus date ------
ggplot(dat, aes(x = date, y = 100*abs(return))) +
geom_line() +
ylim(0, 15) +
labs(title = 'Apple absolute (log) return (%)
1 Jan 2010 - 30 Dec 2019',
x = "Year",
y = "Absolute return (%)")
# ------
ggplot(dat, aes(x = 100*return)) +
geom_histogram(aes(y = ..density..), bins = 40, color = 'lightblue', fill = "royalblue1") +
xlim(-10, 10) +
geom_density(aes(colour = "Kernel"), adjust = 1, alpha = 1) +
stat_function(aes(colour = "Normal"), fun = dnorm, args = list(mean = 100*mean(dat$return), sd = 100*sd(dat$return))) +
scale_colour_manual(name = '', values = c('blue', 'red')) +
labs(title = 'Apple (log) return (%)
1 Jan 2010 - 30 Dec 2019',
x = "Return (%)",
y = "Density")
# ------
ggplot(dat, aes(sample = scale(return))) +
stat_qq(col = "royalblue1") +
stat_qq_line() +
labs(title = 'Apple standardised return
1 Jan 2010 - 30 Dec 2019',
x = 'N(0,1) quantile',
y = 'Sample quantile')
T = length(dat$return)
T
rbar = 1/T*sum(dat$return)
rbar
s = sd(dat$return)
s
b = (1/T*sum((dat$return - rbar)^3)) / (1/T*sum((dat$return - rbar)^2))^(3/2)
b #Skewness
k = (1/T*sum((dat$return - rbar)^4)) / (1/T*sum((dat$return - rbar)^2))^2
k #Kurtosis
skewness=function(return){
(1/T*sum((return - mean(return))^3)) / (1/T*sum((return - mean(return))^2))^(3/2)
}
Kurt=function(return){
(1/T*sum((return - mean(return))^4)) / (1/T*sum((return - mean(return))^2))^2
}
set.seed(9868)
genbootsample <- function(data){
sample(data, length(data), replace=TRUE)}
bootsd <- function(data, T, nrep){
sd(replicate(nrep, T(genbootsample(data))))}
sd_mean <- bootsd(100*return, mean, 10000)
sd_mean
sd_sd <-bootsd(100*return, sd, 10000)
sd_sd
sd_skew <- bootsd(100*return, skewness, 10000)
sd_skew
sd_kurt <-bootsd(100*return, Kurt, 10000)
sd_kurt
## SF2&3
conf.level <- 0.95
ciline <- qnorm((1 - conf.level)/2)/sqrt(length(return))
acf_return <- acf(dat$return, lag = 50, plot = FALSE)
data_acf_return <- with(acf_return, data.frame(lag, acf))
ggplot(data_acf_return, aes(x = lag, y = acf)) +
geom_bar(stat = "identity", position = "identity", fill = "royalblue1") +
geom_line(aes(y = ciline), linetype = "dotted", col = "blue") +
geom_line(aes(y = -ciline), linetype = "dotted", col = "blue") +
labs(title = 'Apple (log) return
1 Jan 2010 - 30 Dec 2019',
x = "Lag",
y = "Autocorrelation")
acf_absolute_return <- acf(abs(dat$return), lag = 50, plot = FALSE)
data_absolute_acf_return <- with(acf_absolute_return, data.frame(lag, acf))
ggplot(data_absolute_acf_return, aes(x = lag, y = acf)) +
geom_bar(stat = "identity", position = "identity", fill = "royalblue1") +
geom_line(aes(y = ciline), linetype = "dotted", col = "blue") +
geom_line(aes(y = -ciline), linetype = "dotted", col = "blue") +
labs(title = 'Apple absolute (log) return
1 Jan 2010 - 30 Dec 2019',
x = "Lag",
y = "Autocorrelation")
#---- ccf
dat2 <- tq_get("MSFT",
from = "2010-01-01",
to = "2019-12-30") %>%
mutate(return = log(adjusted / lag(adjusted))) %>%
filter(!is.na(return))
return2 <- dat2$return
conf.level <- 0.95
ciline <- qnorm((1 - conf.level)/2)/sqrt(length(return))
ccf_return <- ccf(dat$return, dat2$return, lag = 50, plot = FALSE)
data_ccf_return <- with(ccf_return, data.frame(lag, acf))
ggplot(data_ccf_return, aes(x = lag, y = acf)) +
geom_bar(stat = "identity", position = "identity", fill = "royalblue1") +
geom_line(aes(y = ciline), linetype = "dotted", col = "blue") +
geom_line(aes(y = -ciline), linetype = "dotted", col = "blue") +
labs(title = 'Apple and Microsoft (log) return
1 Jan 2010 - 30 Dec 2019',
x = "Lag",
y = "Cross-correlation")
ccf_absolute_return <- ccf(abs(dat$return), abs(dat2$return), lag = 50, plot = FALSE)
data_absolute_ccf_return <- with(ccf_absolute_return, data.frame(lag, acf))
a=1
## (FTS.4.2)
sre <- as.numeric(residuals(fit, standardize = TRUE))
data <- data.frame(dat$date, sre)
ggplot(dat, aes(x = date, y = sre)) +
geom_line() +
labs(title = 'Standardised GARCH(1,1) residual
1 Jan 2010 - 30 Dec 2019',
x = "Time",
y = "value")
## (FTS.4.4) Empirical ACFs of the standardised residuals of the GARCH(1,1) process and their absolute values
conf.level <- 0.95
ciline <- qnorm((1 - conf.level)/2)/sqrt(length(sre))
acf_return <- acf(sre, lag = 50, plot = FALSE)
data_acf_return <- with(acf_return, data.frame(lag, acf))
ggplot(data_acf_return, aes(x = lag, y = acf)) +
geom_bar(stat = "identity", position = "identity", fill = "royalblue1") +
geom_line(aes(y = ciline), linetype = "dotted", col = "blue") +
geom_line(aes(y = -ciline), linetype = "dotted", col = "blue") +
labs(title = 'Standardised GARCH(1,1) residual
1 Jan 2010 - 30 Dec 2019',
x = "Lag",
y = "Autocorrelation")
acf_absolute_return <- acf(abs(sre), lag = 50, plot = FALSE)
data_absolute_acf_return <- with(acf_absolute_return, data.frame(lag, acf))
ggplot(data_absolute_acf_return, aes(x = lag, y = acf)) +
geom_bar(stat = "identity", position = "identity", fill = "royalblue1") +
geom_line(aes(y = ciline), linetype = "dotted", col = "blue") +
geom_line(aes(y = -ciline), linetype = "dotted", col = "blue") +
labs(title = 'Absolute value of standardised GARCH(1,1) residual
1 Jan 2010 - 30 Dec 2019',
x = "Lag",
y = "Autocorrelation")
| /Stylised facts/stylised facts.R | no_license | hahayumo/Summer-Research-Project | R | false | false | 6,381 | r | library(httr)
library(tidyquant)
library(zoo) ## for data processing
library(tidyverse)
library(forecast)
library(Quandl)
library(tseries)
library(rugarch)
dat <- tq_get("AAPL",
from = "2010-01-01",
to = "2019-12-30") %>%
mutate(return = log(adjusted / lag(adjusted))) %>%
filter(!is.na(return))
return <- dat$return
# price series ------
ggplot(dat, aes(x = date, y = adjusted)) +
geom_line() +
labs(title = 'Apple adjusted price
1 Jan 2010 - 30 Dec 2019',
x = "Year",
y = "Adjusted price")
# Return versus date ------
ggplot(dat, aes(x = date, y = 100*return)) + geom_line() +
ylim(-15, 15) +
labs(title = 'Apple (log) return (%)
1 Jan 2010 - 30 Dec 2019',
x = "Year",
y = "Return (%)")
# Abs Return versus date ------
ggplot(dat, aes(x = date, y = 100*abs(return))) +
geom_line() +
ylim(0, 15) +
labs(title = 'Apple absolute (log) return (%)
1 Jan 2010 - 30 Dec 2019',
x = "Year",
y = "Absolute return (%)")
# ------
ggplot(dat, aes(x = 100*return)) +
geom_histogram(aes(y = ..density..), bins = 40, color = 'lightblue', fill = "royalblue1") +
xlim(-10, 10) +
geom_density(aes(colour = "Kernel"), adjust = 1, alpha = 1) +
stat_function(aes(colour = "Normal"), fun = dnorm, args = list(mean = 100*mean(dat$return), sd = 100*sd(dat$return))) +
scale_colour_manual(name = '', values = c('blue', 'red')) +
labs(title = 'Apple (log) return (%)
1 Jan 2010 - 30 Dec 2019',
x = "Return (%)",
y = "Density")
# ------
ggplot(dat, aes(sample = scale(return))) +
stat_qq(col = "royalblue1") +
stat_qq_line() +
labs(title = 'Apple standardised return
1 Jan 2010 - 30 Dec 2019',
x = 'N(0,1) quantile',
y = 'Sample quantile')
T = length(dat$return)
T
rbar = 1/T*sum(dat$return)
rbar
s = sd(dat$return)
s
b = (1/T*sum((dat$return - rbar)^3)) / (1/T*sum((dat$return - rbar)^2))^(3/2)
b #Skewness
k = (1/T*sum((dat$return - rbar)^4)) / (1/T*sum((dat$return - rbar)^2))^2
k #Kurtosis
skewness=function(return){
(1/T*sum((return - mean(return))^3)) / (1/T*sum((return - mean(return))^2))^(3/2)
}
Kurt=function(return){
(1/T*sum((return - mean(return))^4)) / (1/T*sum((return - mean(return))^2))^2
}
set.seed(9868)
genbootsample <- function(data){
sample(data, length(data), replace=TRUE)}
bootsd <- function(data, T, nrep){
sd(replicate(nrep, T(genbootsample(data))))}
sd_mean <- bootsd(100*return, mean, 10000)
sd_mean
sd_sd <-bootsd(100*return, sd, 10000)
sd_sd
sd_skew <- bootsd(100*return, skewness, 10000)
sd_skew
sd_kurt <-bootsd(100*return, Kurt, 10000)
sd_kurt
## SF2&3
conf.level <- 0.95
ciline <- qnorm((1 - conf.level)/2)/sqrt(length(return))
acf_return <- acf(dat$return, lag = 50, plot = FALSE)
data_acf_return <- with(acf_return, data.frame(lag, acf))
ggplot(data_acf_return, aes(x = lag, y = acf)) +
geom_bar(stat = "identity", position = "identity", fill = "royalblue1") +
geom_line(aes(y = ciline), linetype = "dotted", col = "blue") +
geom_line(aes(y = -ciline), linetype = "dotted", col = "blue") +
labs(title = 'Apple (log) return
1 Jan 2010 - 30 Dec 2019',
x = "Lag",
y = "Autocorrelation")
acf_absolute_return <- acf(abs(dat$return), lag = 50, plot = FALSE)
data_absolute_acf_return <- with(acf_absolute_return, data.frame(lag, acf))
ggplot(data_absolute_acf_return, aes(x = lag, y = acf)) +
geom_bar(stat = "identity", position = "identity", fill = "royalblue1") +
geom_line(aes(y = ciline), linetype = "dotted", col = "blue") +
geom_line(aes(y = -ciline), linetype = "dotted", col = "blue") +
labs(title = 'Apple absolute (log) return
1 Jan 2010 - 30 Dec 2019',
x = "Lag",
y = "Autocorrelation")
#---- ccf
dat2 <- tq_get("MSFT",
from = "2010-01-01",
to = "2019-12-30") %>%
mutate(return = log(adjusted / lag(adjusted))) %>%
filter(!is.na(return))
return2 <- dat2$return
conf.level <- 0.95
ciline <- qnorm((1 - conf.level)/2)/sqrt(length(return))
ccf_return <- ccf(dat$return, dat2$return, lag = 50, plot = FALSE)
data_ccf_return <- with(ccf_return, data.frame(lag, acf))
ggplot(data_ccf_return, aes(x = lag, y = acf)) +
geom_bar(stat = "identity", position = "identity", fill = "royalblue1") +
geom_line(aes(y = ciline), linetype = "dotted", col = "blue") +
geom_line(aes(y = -ciline), linetype = "dotted", col = "blue") +
labs(title = 'Apple and Microsoft (log) return
1 Jan 2010 - 30 Dec 2019',
x = "Lag",
y = "Cross-correlation")
ccf_absolute_return <- ccf(abs(dat$return), abs(dat2$return), lag = 50, plot = FALSE)
data_absolute_ccf_return <- with(ccf_absolute_return, data.frame(lag, acf))
a=1
## (FTS.4.2)
sre <- as.numeric(residuals(fit, standardize = TRUE))
data <- data.frame(dat$date, sre)
ggplot(dat, aes(x = date, y = sre)) +
geom_line() +
labs(title = 'Standardised GARCH(1,1) residual
1 Jan 2010 - 30 Dec 2019',
x = "Time",
y = "value")
## (FTS.4.4) Empirical ACFs of the standardised residuals of the GARCH(1,1) process and their absolute values
conf.level <- 0.95
ciline <- qnorm((1 - conf.level)/2)/sqrt(length(sre))
acf_return <- acf(sre, lag = 50, plot = FALSE)
data_acf_return <- with(acf_return, data.frame(lag, acf))
ggplot(data_acf_return, aes(x = lag, y = acf)) +
geom_bar(stat = "identity", position = "identity", fill = "royalblue1") +
geom_line(aes(y = ciline), linetype = "dotted", col = "blue") +
geom_line(aes(y = -ciline), linetype = "dotted", col = "blue") +
labs(title = 'Standardised GARCH(1,1) residual
1 Jan 2010 - 30 Dec 2019',
x = "Lag",
y = "Autocorrelation")
acf_absolute_return <- acf(abs(sre), lag = 50, plot = FALSE)
data_absolute_acf_return <- with(acf_absolute_return, data.frame(lag, acf))
ggplot(data_absolute_acf_return, aes(x = lag, y = acf)) +
geom_bar(stat = "identity", position = "identity", fill = "royalblue1") +
geom_line(aes(y = ciline), linetype = "dotted", col = "blue") +
geom_line(aes(y = -ciline), linetype = "dotted", col = "blue") +
labs(title = 'Absolute value of standardised GARCH(1,1) residual
1 Jan 2010 - 30 Dec 2019',
x = "Lag",
y = "Autocorrelation")
|
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.5,family="gaussian",standardize=TRUE)
sink('./Model/EN/AvgRank/urinary_tract/urinary_tract_057.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/urinary_tract/urinary_tract_057.R | no_license | leon1003/QSMART | R | false | false | 372 | r | library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.5,family="gaussian",standardize=TRUE)
sink('./Model/EN/AvgRank/urinary_tract/urinary_tract_057.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## Create a cacheable inverse matrix
makeCacheMatrix <- function( x = matrix() ) {
## initialize the property
i <- NULL
## get the matrix
get <- function() {
x ## Return matrix
}
## set the matrix
set <- function( m ) {
x <<- m
i <<- NULL
}
## Method to get the inverse of the matrix
getInv <- function() {
## Return the inverse property
i
}
## set inverse matrix
setInv <- function( tmp ) {
i <<- tmp
}
## return a list methods
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## Get the inverse matrix, and use cache if it has been already calculated
cacheSolve <- function( x, ... ) {
## Return inverse of x
m <- x$getInv()
## return the inverse if its already set
if( !is.null( m ) ) {
message( "cached value" )
return(m)
}
## Get matrix
data <- x$get()
## Calculate the inverse
m <- solve( data ) %*% data
## Set inverse
x$setInv( m )
## Return matrix
m
}
| /cachematrix.R | no_license | tiveron/ProgrammingAssignment2 | R | false | false | 1,079 | r | ## Create a cacheable inverse matrix
makeCacheMatrix <- function( x = matrix() ) {
## initialize the property
i <- NULL
## get the matrix
get <- function() {
x ## Return matrix
}
## set the matrix
set <- function( m ) {
x <<- m
i <<- NULL
}
## Method to get the inverse of the matrix
getInv <- function() {
## Return the inverse property
i
}
## set inverse matrix
setInv <- function( tmp ) {
i <<- tmp
}
## return a list methods
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## Get the inverse matrix, and use cache if it has been already calculated
cacheSolve <- function( x, ... ) {
## Return inverse of x
m <- x$getInv()
## return the inverse if its already set
if( !is.null( m ) ) {
message( "cached value" )
return(m)
}
## Get matrix
data <- x$get()
## Calculate the inverse
m <- solve( data ) %*% data
## Set inverse
x$setInv( m )
## Return matrix
m
}
|
## These functions allow to cache the inverse matrix of an invertible
## (square) matrix, which might be needed again later in a calculation.
## Caching helps to avoid repeating the same time-consuming calculations.
## The makeCacheMatrix function creates a special matrix-like object that
## can cache its inverse.
## set(y) sets the matrix y
## get() gets the set matrix
## setinv(y*) sets the inverse matrix y*
## getinv() gets the cached inverse matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## The cacheSolve function checks, whether the inverse of the object x
## returned by makeCacheMatrix has already been calculated and cached.If so,
## it returns the cached inverse matrix inv. Otherwise it calculates the
## inverse matrix inv of x and returns it to be cached by makeCacheMatrix.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
my_matrix <- x$get()
inv <- solve(my_matrix, ...)
x$setinv(inv)
inv
}
| /cachematrix.R | no_license | ReginaAlexandra/ProgrammingAssignment2 | R | false | false | 1,372 | r | ## These functions allow to cache the inverse matrix of an invertible
## (square) matrix, which might be needed again later in a calculation.
## Caching helps to avoid repeating the same time-consuming calculations.
## The makeCacheMatrix function creates a special matrix-like object that
## can cache its inverse.
## set(y) sets the matrix y
## get() gets the set matrix
## setinv(y*) sets the inverse matrix y*
## getinv() gets the cached inverse matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## The cacheSolve function checks, whether the inverse of the object x
## returned by makeCacheMatrix has already been calculated and cached.If so,
## it returns the cached inverse matrix inv. Otherwise it calculates the
## inverse matrix inv of x and returns it to be cached by makeCacheMatrix.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
my_matrix <- x$get()
inv <- solve(my_matrix, ...)
x$setinv(inv)
inv
}
|
library(ncdf4)
# roms.dir = 'C:/Users/joseph.caracappa/Documents/Atlantis/ROMS_COBALT/Forcing_Files/Annual_Output/phys_statevars_alternate/'
roms.dir = 'C:/Users/joseph.caracappa/Documents/GitHub/neus-atlantis/CurrentVersion/tsfiles/Annual_Files/'
phys.files = list.files(roms.dir,'^roms_tempsalt.*\\.nc$')
for( i in 1:length(phys.files)){
file.nc = nc_open(paste0(roms.dir,phys.files[i]),write=T)
names(file.nc$var)
ncvar_rename(file.nc,'Temp','temperature')
ncvar_rename(file.nc,'salt','salinity')
nc_close(file.nc)
print(i)
}
# test2 = nc_open('C:/Users/joseph.caracappa/Documents/Atlantis/ROMS_COBALT/Forcing_Files/Annual_Output/phys_statevars/temp1964.nc')
# names(test2$var)
# test3 = nc_open('C:/Users/joseph.caracappa/Documents/Atlantis/ROMS_COBALT/Forcing_Files/Annual_Output/phys_statevars/salt_1964.nc')
# names(test3$var)
| /R/ROMS_COBALT/fix_force_varnames.R | no_license | wechuli/large-pr | R | false | false | 857 | r |
library(ncdf4)
# roms.dir = 'C:/Users/joseph.caracappa/Documents/Atlantis/ROMS_COBALT/Forcing_Files/Annual_Output/phys_statevars_alternate/'
roms.dir = 'C:/Users/joseph.caracappa/Documents/GitHub/neus-atlantis/CurrentVersion/tsfiles/Annual_Files/'
phys.files = list.files(roms.dir,'^roms_tempsalt.*\\.nc$')
for( i in 1:length(phys.files)){
file.nc = nc_open(paste0(roms.dir,phys.files[i]),write=T)
names(file.nc$var)
ncvar_rename(file.nc,'Temp','temperature')
ncvar_rename(file.nc,'salt','salinity')
nc_close(file.nc)
print(i)
}
# test2 = nc_open('C:/Users/joseph.caracappa/Documents/Atlantis/ROMS_COBALT/Forcing_Files/Annual_Output/phys_statevars/temp1964.nc')
# names(test2$var)
# test3 = nc_open('C:/Users/joseph.caracappa/Documents/Atlantis/ROMS_COBALT/Forcing_Files/Annual_Output/phys_statevars/salt_1964.nc')
# names(test3$var)
|
## This script tested and ready for production mode on Carbonate
library(PopGenome)
library(bigmemory)
library(tools)
##########################################################################
vcfDir <- "/N/dc2/projects/PromoterPopGen/human/human-split-data/cisreg_chr13"
fileList <- "/N/dc2/projects/PromoterPopGen/human/human-split-data/cisreg_chr13/human_file_list_13.txt"
file.names <- read.csv(file=fileList, header=FALSE)
colnames(file.names) <- c("chr", "start", "end", "file")
gffFile <- "/N/dc2/projects/PromoterPopGen/TSSs_gff/TSSset_human_chr_13_updated.gff3"
popListFile <- "/N/dc2/projects/PromoterPopGen/DmPromoterPopGen/data/human/pop_list_1kGP.csv"
##########################################################################
setwd(vcfDir)
source("/N/dc2/projects/PromoterPopGen/DmPromoterPopGen/scripts/identifiers_to_list.R")
pop.list <- identifiers_to_list(csv.file=popListFile)
for (i in 1:nrow(file.names)) {
print(i)
this.string <- file.names[i,]
this.chr <- as.character(this.string[1])
this.start <- this.string[2]
if (this.start==0) {
this.start <- 1
}
this.end <- this.string[3]
this.filename <- as.character(unlist(this.string[4]))
this.filename2 <- file_path_sans_ext(this.filename)
diversity_out <- paste(this.filename2, "diversity", sep="_")
diversity_filename <- paste(diversity_out, "txt", sep=".")
#for debugging
print(diversity_filename)
print(this.chr)
print(this.start)
print(this.end)
print(this.filename)
GENOME.class <- readVCF(filename=this.filename, numcols=100000, frompos=this.start, topos=this.end, tid=this.chr, gffpath=gffFile)
GENOME.class <- set.populations(GENOME.class, new.populations=pop.list, diploid=TRUE)
split <- GENOME.class.split <- splitting.data(GENOME.class,subsites="gene")
gc()
split <- diversity.stats(split, pi=TRUE, keep.site.info=TRUE)
feature.names <- split@region.names
n.features <- length(split@region.names)
nuc.diversity.m <- split@nuc.diversity.within
colnames(nuc.diversity.m) <- names(pop.list)
write.table(nuc.diversity.m, col.names=TRUE, row.names=FALSE, sep="\t", file=diversity_filename)
for (i in 1:n.features) {
print(i)
f.name <- feature.names[i]
root.name <- paste(f.name, "chr", this.chr, sep="_")
fileName <- paste(root.name, "txt", sep=".")
pi.ma <- split@region.stats@nuc.diversity.within[[i]]
if (is.null(pi.ma)==FALSE) {
pi.ma.t <- t(pi.ma)
colnames(pi.ma.t) <- names(pop.list)
write.table(pi.ma.t, col.names=TRUE, row.names=TRUE, sep="\t", quote=FALSE, file=fileName)
}
else { print("No variation in feature.")
}
}
gc()
} | /scripts/PopGenome_cisreg_chr13.R | no_license | xuan-w/Dm_Promoter_PopGen | R | false | false | 2,729 | r | ## This script tested and ready for production mode on Carbonate
library(PopGenome)
library(bigmemory)
library(tools)
##########################################################################
vcfDir <- "/N/dc2/projects/PromoterPopGen/human/human-split-data/cisreg_chr13"
fileList <- "/N/dc2/projects/PromoterPopGen/human/human-split-data/cisreg_chr13/human_file_list_13.txt"
file.names <- read.csv(file=fileList, header=FALSE)
colnames(file.names) <- c("chr", "start", "end", "file")
gffFile <- "/N/dc2/projects/PromoterPopGen/TSSs_gff/TSSset_human_chr_13_updated.gff3"
popListFile <- "/N/dc2/projects/PromoterPopGen/DmPromoterPopGen/data/human/pop_list_1kGP.csv"
##########################################################################
setwd(vcfDir)
source("/N/dc2/projects/PromoterPopGen/DmPromoterPopGen/scripts/identifiers_to_list.R")
pop.list <- identifiers_to_list(csv.file=popListFile)
for (i in 1:nrow(file.names)) {
print(i)
this.string <- file.names[i,]
this.chr <- as.character(this.string[1])
this.start <- this.string[2]
if (this.start==0) {
this.start <- 1
}
this.end <- this.string[3]
this.filename <- as.character(unlist(this.string[4]))
this.filename2 <- file_path_sans_ext(this.filename)
diversity_out <- paste(this.filename2, "diversity", sep="_")
diversity_filename <- paste(diversity_out, "txt", sep=".")
#for debugging
print(diversity_filename)
print(this.chr)
print(this.start)
print(this.end)
print(this.filename)
GENOME.class <- readVCF(filename=this.filename, numcols=100000, frompos=this.start, topos=this.end, tid=this.chr, gffpath=gffFile)
GENOME.class <- set.populations(GENOME.class, new.populations=pop.list, diploid=TRUE)
split <- GENOME.class.split <- splitting.data(GENOME.class,subsites="gene")
gc()
split <- diversity.stats(split, pi=TRUE, keep.site.info=TRUE)
feature.names <- split@region.names
n.features <- length(split@region.names)
nuc.diversity.m <- split@nuc.diversity.within
colnames(nuc.diversity.m) <- names(pop.list)
write.table(nuc.diversity.m, col.names=TRUE, row.names=FALSE, sep="\t", file=diversity_filename)
for (i in 1:n.features) {
print(i)
f.name <- feature.names[i]
root.name <- paste(f.name, "chr", this.chr, sep="_")
fileName <- paste(root.name, "txt", sep=".")
pi.ma <- split@region.stats@nuc.diversity.within[[i]]
if (is.null(pi.ma)==FALSE) {
pi.ma.t <- t(pi.ma)
colnames(pi.ma.t) <- names(pop.list)
write.table(pi.ma.t, col.names=TRUE, row.names=TRUE, sep="\t", quote=FALSE, file=fileName)
}
else { print("No variation in feature.")
}
}
gc()
} |
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.7,family="gaussian",standardize=TRUE)
sink('./bone_073.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/bone/bone_073.R | no_license | esbgkannan/QSMART | R | false | false | 341 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.7,family="gaussian",standardize=TRUE)
sink('./bone_073.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-functions.R
\name{ProcessAllRawData}
\alias{ProcessAllRawData}
\alias{process_raw_data}
\title{Process raw data in to the appropriate format.}
\usage{
process_raw_data(storage_path = here::here("working-data", "paper"))
}
\arguments{
\item{storage_path}{where to get and put the data}
}
\description{
Process all the datasets and make the key of site names
}
| /man/ProcessAllRawData.Rd | permissive | diazrenata/replicate-becs | R | false | true | 442 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-functions.R
\name{ProcessAllRawData}
\alias{ProcessAllRawData}
\alias{process_raw_data}
\title{Process raw data in to the appropriate format.}
\usage{
process_raw_data(storage_path = here::here("working-data", "paper"))
}
\arguments{
\item{storage_path}{where to get and put the data}
}
\description{
Process all the datasets and make the key of site names
}
|
#' Run the Shiny Application
#'
#' @param onStart NULL, A function that will be called before the app is actually run.
#' @param options List, Named options that should be passed to the runApp call (these can be any of the following: "port", "launch.browser", "host", "quiet", "display.mode" and "test.mode"). You can also specify width and height parameters which provide a hint to the embedding environment about the ideal height/width for the app.
#' @param enableBookmarking Can be one of "url", "server", or "disable". The default value, NULL, will respect the setting from any previous calls to enableBookmarking(). See enableBookmarking() for more information on bookmarking your app.
#' @param ... arguments to pass to golem_opts
#'
#' @export
#' @importFrom shiny shinyApp
#' @importFrom golem with_golem_options
run_app <- function(
onStart = NULL,
options = list(),
enableBookmarking = NULL,
...
) {
with_golem_options(
app = shinyApp(
ui = app_ui,
server = app_server,
onStart = onStart,
options = options,
enableBookmarking = enableBookmarking
),
golem_opts = list(...)
)
}
| /R/run_app.R | permissive | jengelaere/enr.reseaux | R | false | false | 1,145 | r | #' Run the Shiny Application
#'
#' @param onStart NULL, A function that will be called before the app is actually run.
#' @param options List, Named options that should be passed to the runApp call (these can be any of the following: "port", "launch.browser", "host", "quiet", "display.mode" and "test.mode"). You can also specify width and height parameters which provide a hint to the embedding environment about the ideal height/width for the app.
#' @param enableBookmarking Can be one of "url", "server", or "disable". The default value, NULL, will respect the setting from any previous calls to enableBookmarking(). See enableBookmarking() for more information on bookmarking your app.
#' @param ... arguments to pass to golem_opts
#'
#' @export
#' @importFrom shiny shinyApp
#' @importFrom golem with_golem_options
run_app <- function(
onStart = NULL,
options = list(),
enableBookmarking = NULL,
...
) {
with_golem_options(
app = shinyApp(
ui = app_ui,
server = app_server,
onStart = onStart,
options = options,
enableBookmarking = enableBookmarking
),
golem_opts = list(...)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psi_criterion_generalized_kw_pmf.R
\name{psi_criterion_generalized_kw_pmf}
\alias{psi_criterion_generalized_kw_pmf}
\title{Psi Criterion Generalized KW PMF}
\usage{
psi_criterion_generalized_kw_pmf(x, d, data, left_trunc, right_trunc)
}
\arguments{
\item{x}{Vector of parameters}
\item{d}{Int used to indicate the number of 'a' terms within the 'param_bounds' variable. The remaining values will be considered 'b' terms.}
\item{data}{Vector of observed values}
\item{left_trunc}{Int used to determine starting index of model to use for optimization}
\item{right_trunc}{Int used to determine ending index of model to use for optimization}
}
\description{
This function calculates the Psi criterion goodness of cit metric given a set of parameters for the probability mass function of the Generalized Kolmogorov Waring function.
}
| /man/psi_criterion_generalized_kw_pmf.Rd | no_license | gragedaa/SkeweDF | R | false | true | 911 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psi_criterion_generalized_kw_pmf.R
\name{psi_criterion_generalized_kw_pmf}
\alias{psi_criterion_generalized_kw_pmf}
\title{Psi Criterion Generalized KW PMF}
\usage{
psi_criterion_generalized_kw_pmf(x, d, data, left_trunc, right_trunc)
}
\arguments{
\item{x}{Vector of parameters}
\item{d}{Int used to indicate the number of 'a' terms within the 'param_bounds' variable. The remaining values will be considered 'b' terms.}
\item{data}{Vector of observed values}
\item{left_trunc}{Int used to determine starting index of model to use for optimization}
\item{right_trunc}{Int used to determine ending index of model to use for optimization}
}
\description{
This function calculates the Psi criterion goodness of cit metric given a set of parameters for the probability mass function of the Generalized Kolmogorov Waring function.
}
|
apply2 <- function(x, fun=mean, ...) {
if(nrow(x)==1)
return(x)
else
return(apply(x, 2, fun, ...))
}
summarizeRows <- function(matrix,
factor,
fun=mean, ...) {
stopifnot(is.matrix(matrix) && nrow(matrix) == length(factor))
fun <- match.fun(fun)
ind.by.fac <- split(1:nrow(matrix), factor)
mat.fun <- t(sapply(ind.by.fac,
function(x) apply2(matrix[x,,drop=FALSE],
fun=fun, ...)))
colnames(mat.fun) <- colnames(matrix)
return(mat.fun)
}
apply1 <- function(x, fun=mean, ...) {
if(ncol(x)==1)
return(x)
else
return(apply(x, 1, fun, ...))
}
summarizeColumns <- function(matrix,
factor,
fun=mean, ...) {
stopifnot(is.matrix(matrix) && ncol(matrix) == length(factor))
fun <- match.fun(fun)
ind.by.fac <- split(1:ncol(matrix), factor)
mat.fun <- sapply(ind.by.fac,
function(x) apply1(matrix[,x,drop=FALSE],
fun=fun, ...))
rownames(mat.fun) <- rownames(matrix)
return(mat.fun)
}
summarizeCols <- summarizeColumns
| /ribiosUtils/R/summarizeRows.R | no_license | RCBiczok/ribios | R | false | false | 1,189 | r | apply2 <- function(x, fun=mean, ...) {
if(nrow(x)==1)
return(x)
else
return(apply(x, 2, fun, ...))
}
summarizeRows <- function(matrix,
factor,
fun=mean, ...) {
stopifnot(is.matrix(matrix) && nrow(matrix) == length(factor))
fun <- match.fun(fun)
ind.by.fac <- split(1:nrow(matrix), factor)
mat.fun <- t(sapply(ind.by.fac,
function(x) apply2(matrix[x,,drop=FALSE],
fun=fun, ...)))
colnames(mat.fun) <- colnames(matrix)
return(mat.fun)
}
apply1 <- function(x, fun=mean, ...) {
if(ncol(x)==1)
return(x)
else
return(apply(x, 1, fun, ...))
}
summarizeColumns <- function(matrix,
factor,
fun=mean, ...) {
stopifnot(is.matrix(matrix) && ncol(matrix) == length(factor))
fun <- match.fun(fun)
ind.by.fac <- split(1:ncol(matrix), factor)
mat.fun <- sapply(ind.by.fac,
function(x) apply1(matrix[,x,drop=FALSE],
fun=fun, ...))
rownames(mat.fun) <- rownames(matrix)
return(mat.fun)
}
summarizeCols <- summarizeColumns
|
\name{p2_H0}
\docType{data}
\alias{p2_H0}
\title{Signal of association of the statistic S2 under H0}
\description{
This dataset contains the signal of association of the statistic S2 under the null hypothesis H0. It consists of 200 values, one for each simulations under H0. See the vignette for the definition of S2 and more details: \code{vignette("waffect-tutorial")}.
}
\usage{data(p2_H0)}
\format{A table with one column and 200 rows}
\keyword{dataset} | /man/p2_H0.Rd | no_license | cran/waffect | R | false | false | 459 | rd | \name{p2_H0}
\docType{data}
\alias{p2_H0}
\title{Signal of association of the statistic S2 under H0}
\description{
This dataset contains the signal of association of the statistic S2 under the null hypothesis H0. It consists of 200 values, one for each simulations under H0. See the vignette for the definition of S2 and more details: \code{vignette("waffect-tutorial")}.
}
\usage{data(p2_H0)}
\format{A table with one column and 200 rows}
\keyword{dataset} |
a <- matrix(NA,ncol=5,nrow=6)
b <- 0.5*a*0.25
| /Scripts/test.R | no_license | wfarmer-usgs/hello-world | R | false | false | 46 | r | a <- matrix(NA,ncol=5,nrow=6)
b <- 0.5*a*0.25
|
q1 <- function(x) {return(sqrt(2*x-x^3))}
d <- seq(-5,5,.1)
plot(d,q1(d),type='l')
q2 <- function(x) {return ( log(x)*(x-3)/(x^2-4) ) }
d <- seq(-5,5,.01)
plot(d,q2(d),type='l')
q3 <- function(x) {return ( asin((x-2)/3) ) }
d <- seq(-6,6,.01)
plot(d,q3(d),type='l')
q5 <- function(x) {return ( log(1+x^2) ) }
d <- seq(-6,6,.01)
plot(d,q5(d),type='l')
q6 <- function(x) {return ( atan(cos(x)) ) }
d <- seq(-6,6,.01)
plot(d,q6(d),type='l')
q6(0)
pi/4
q8 <- function(x) {
e=exp(1)
return ( e^(2*x) )
}
q8(1)
log( q8(1)^2)
log( q8(1))/2
q1 <- function(x) {return( log(sin(x)) )}
d <- seq(-9,9,.001)
plot(d,q1(d),type='l')
q2 <- function(k) {return( (-1)^(k)*log(4)^(k)/factorial(k) )}
sum(q2(0:20))
q3 <- function(k) {return( (-1)^(k)*pi^(2*k)/factorial(2*k) )}
sum(q3(0:20))
q4 <- function(k) {return( (-1)^(k+1)*2^k/(2*k-1) )}
q4(1:5)
q5 <- function(x) {return( 1/(1-x) )}
d <- seq(-0.1,0.1,.001)
plot(d,q5(d),type='l')
| /Calculus/hw1.R | no_license | BusyStas/R-Training | R | false | false | 1,004 | r | q1 <- function(x) {return(sqrt(2*x-x^3))}
d <- seq(-5,5,.1)
plot(d,q1(d),type='l')
q2 <- function(x) {return ( log(x)*(x-3)/(x^2-4) ) }
d <- seq(-5,5,.01)
plot(d,q2(d),type='l')
q3 <- function(x) {return ( asin((x-2)/3) ) }
d <- seq(-6,6,.01)
plot(d,q3(d),type='l')
q5 <- function(x) {return ( log(1+x^2) ) }
d <- seq(-6,6,.01)
plot(d,q5(d),type='l')
q6 <- function(x) {return ( atan(cos(x)) ) }
d <- seq(-6,6,.01)
plot(d,q6(d),type='l')
q6(0)
pi/4
q8 <- function(x) {
e=exp(1)
return ( e^(2*x) )
}
q8(1)
log( q8(1)^2)
log( q8(1))/2
q1 <- function(x) {return( log(sin(x)) )}
d <- seq(-9,9,.001)
plot(d,q1(d),type='l')
q2 <- function(k) {return( (-1)^(k)*log(4)^(k)/factorial(k) )}
sum(q2(0:20))
q3 <- function(k) {return( (-1)^(k)*pi^(2*k)/factorial(2*k) )}
sum(q3(0:20))
q4 <- function(k) {return( (-1)^(k+1)*2^k/(2*k-1) )}
q4(1:5)
q5 <- function(x) {return( 1/(1-x) )}
d <- seq(-0.1,0.1,.001)
plot(d,q5(d),type='l')
|
library(tidyverse)
library(biomaRt)
library(magrittr)
# download ORFs sequences for fish ----------------------------------------
## I will use a filter from the genes that we quantified
filter_genes <-
"../19-01-09-RNAseqProfilesFish/rna-seq-profiles/alpha-amanitin-prolife.csv" %>%
read.csv(stringsAsFactors = F) %>%
.$Gene_ID %>%
unique()
ensembl <- useMart("ensembl", dataset = "drerio_gene_ensembl")
codingseq <- getBM(
attributes = c("ensembl_gene_id", "coding"),
filters = "ensembl_gene_id",
values = filter_genes,
mart = ensembl
)
utr3_seq <- getBM(
attributes = c("ensembl_gene_id", "3utr"),
filters = "ensembl_gene_id",
values = filter_genes,
mart = ensembl
)
# aply filters to the sequences -------------------------------------------
codingseq <-
codingseq %>%
as_tibble() %>%
filter(
!str_detect(coding, "Sequence unavailable"),
str_length(coding) %% 3 == 0, # coding should be a multiple of 3
!str_detect(coding, "N") # No Ns is better
) %>%
group_by(ensembl_gene_id) %>%
do(
arrange(., -str_length(coding)) %>% # get only 1 isophorm (keep longest)
slice(1)
) %>%
ungroup()
utr3_seq <-
utr3_seq %>%
as_tibble() %>%
filter(
!str_detect(`3utr`, "Sequence unavailable"),
!str_detect(`3utr`, "N")
) %>%
group_by(ensembl_gene_id) %>%
do(
arrange(., -str_length(`3utr`)) %>% # get only 1 isophorm (keep longest)
slice(1)
) %>%
ungroup()
# join table and saves results --------------------------------------------
inner_join(codingseq, utr3_seq) %>%
write_csv("sequence-data/fish_seq_data_cds_3utr.csv")
| /data/19-01-17-Get-ORFS-UTRS-codon-composition/get_fish_cds_and_utr_data.R | permissive | santiago1234/MZT-rna-stability | R | false | false | 1,639 | r | library(tidyverse)
library(biomaRt)
library(magrittr)
# download ORFs sequences for fish ----------------------------------------
## I will use a filter from the genes that we quantified
filter_genes <-
"../19-01-09-RNAseqProfilesFish/rna-seq-profiles/alpha-amanitin-prolife.csv" %>%
read.csv(stringsAsFactors = F) %>%
.$Gene_ID %>%
unique()
ensembl <- useMart("ensembl", dataset = "drerio_gene_ensembl")
codingseq <- getBM(
attributes = c("ensembl_gene_id", "coding"),
filters = "ensembl_gene_id",
values = filter_genes,
mart = ensembl
)
utr3_seq <- getBM(
attributes = c("ensembl_gene_id", "3utr"),
filters = "ensembl_gene_id",
values = filter_genes,
mart = ensembl
)
# aply filters to the sequences -------------------------------------------
codingseq <-
codingseq %>%
as_tibble() %>%
filter(
!str_detect(coding, "Sequence unavailable"),
str_length(coding) %% 3 == 0, # coding should be a multiple of 3
!str_detect(coding, "N") # No Ns is better
) %>%
group_by(ensembl_gene_id) %>%
do(
arrange(., -str_length(coding)) %>% # get only 1 isophorm (keep longest)
slice(1)
) %>%
ungroup()
utr3_seq <-
utr3_seq %>%
as_tibble() %>%
filter(
!str_detect(`3utr`, "Sequence unavailable"),
!str_detect(`3utr`, "N")
) %>%
group_by(ensembl_gene_id) %>%
do(
arrange(., -str_length(`3utr`)) %>% # get only 1 isophorm (keep longest)
slice(1)
) %>%
ungroup()
# join table and saves results --------------------------------------------
inner_join(codingseq, utr3_seq) %>%
write_csv("sequence-data/fish_seq_data_cds_3utr.csv")
|
library(here)
library(readr)
library(dplyr)
library(tidyr)
library(magrittr)
library(ggplot2)
library(ArrayExpress)
library(Biobase)
library(pd.clariom.s.human.ht)
library(clariomshumantranscriptcluster.db)
#library(pd.hugene.1.0.st.v1)
#library(hugene10sttranscriptcluster.db)
#library(AffyCompatible)
library(oligo)
library(arrayQualityMetrics)
library(limma)
library(topGO)
library(ReactomePA)
library(clusterProfiler)
library(gplots)
library(ggplot2)
library(geneplotter)
library(RColorBrewer)
library(pheatmap)
library(enrichplot)
library(stringr)
library(matrixStats)
library(genefilter)
library(openxlsx)
library(mikelaffr)
# OUTPUT ###############################################################################################################
dir.pdfs <- here("doc/microarray/pdfs/")
dir.create(dir.pdfs, recursive = TRUE, showWarnings = FALSE)
dir.quality.metrics <- here("doc/microarray/quality_metrics/")
dir.create(dir.quality.metrics, recursive = TRUE, showWarnings = FALSE)
# background removed, normalized, and filtered expression set
output.expressionSet.rds <- here("results/rdata_files/20220829_es_HNP_Differential_Microarray_SST-RMA.rds")
# INPUT ################################################################################################################
# Microarray ExpressionSet for HNP Differentiataion Experiment
expressionSet.rds <- here("results/rdata_files/20220829_es_HNP_Differentiation_Microarray.rds")
# yep
#tmp.txt <- here("results/microarray/Stein_Mike_Clariom_S_Human_24_08172022/log2.SST-RMA-GENE-FULL - Group 1.TXT")
# GLOBALS ##############################################################################################################
# Import ExpressionSet #################################################################################################
raw.es <- readRDS(expressionSet.rds)
# # yep
# temp.log2.expression <- read_tsv(tmp.txt)
#
# log2.expression %<>%
# select(1:25)
#
# samples <- colnames(log2.expression)[2:25]
# colnames(log2.expression)[2:25] <- paste0(sapply(strsplit(samples, "\\."), `[`, 1), ".CEL")
#
# df.expression <- as.data.frame(log2.expression)
# rownames(df.expression) <- df.expression[,1]
#
# df.expression <- df.expression[,2:25]
# QC Before Correction #################
# simple log2 of expression
log2.expr <- log2(exprs(raw.es))
pca <- prcomp(t(log2.expr), center = TRUE, scale. = FALSE)
# combine with phenotype data
df.pca <- as_tibble(data.frame(pca$x[,1:20], pData(raw.es), stringsAsFactors = FALSE))
percentVar <- pca$sdev^2 / sum(pca$sdev^2) * 100
df.pca %<>%
mutate(Condition = paste(Expression, Timepoint, sep = "_"))
# scree plot
ggplot(data.frame(perc_var=percentVar, pc=1:length(percentVar))[1:10,], aes(x=pc, y=perc_var)) +
geom_point() +
geom_line() +
labs(x="PC", y="Percent Total Variance",
title="PCA: Microarray Expression (log2 Norm.)") +
scale_x_continuous(breaks = seq(1:10)) +
plotTheme()
#colnames(df.pca)
df.pca %>%
ggplot(aes(PC1, PC2, color = Condition)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
#ggsave(paste0(dir.pdfs, "pca_log2_norm.pdf"), height = 6, width = 8)
#pdf(paste0(dir.pdfs, "pca_by_technical.pdf"))
# df.pca %>%
# ggplot(aes(`CRNA Yield (ug)`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`Extraction Date`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`Extraction Date`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(Expression, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(Replicate, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(Timepoint, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`ng/ul`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(A260, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(A280, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`260/280`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`260/230`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`340 raw`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`Total RNA (ng)`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`Core Sample ID`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(RIN, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`28S/18S (Area)`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`TapStation Conc. ng/ul`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`Qubit Conc. ng/ul`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(Condition, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#dev.off()
rm(df.pca, log2.expr, pca, percentVar)
# sort(abs(pca$rotation[,1]), decreasing = TRUE)[1:20]
#
# sort(abs(pca$rotation[,2]), decreasing = TRUE)[1:20]
# log2 intensities
oligo::boxplot(raw.es)
# quality metrics
# arrayQualityMetrics(expressionset = raw.es,
# outdir = dir.quality.metrics,
# force = TRUE,
# do.logtransform = TRUE,
# intgroup = c("Expression", "Timepoint"))
# RLE #################
# Relative Log Expression
# rle.es <- rma(raw.es, normalize = FALSE)
#
# row_medians_assayData <- Biobase::rowMedians(as.matrix(exprs(rle.es)))
#
# RLE_data <- sweep(exprs(rle.es), 1, row_medians_assayData)
#
# RLE_data <- as.data.frame(RLE_data)
# RLE_data_gathered <- gather(RLE_data, sample, log2_expression_deviation)
#
# ggplot(RLE_data_gathered, aes(sample,log2_expression_deviation)) +
# geom_boxplot(outlier.shape = NA) +
# ylim(c(-2, 2)) +
# theme(axis.text.x = element_text(colour = "aquamarine4",
# angle = 60, size = 6.5, hjust = 1 ,
# face = "bold"))
# rm(rle.es, RLE_data, RLE_data_gathered, row_medians_assayData)
# RMA Calibration ##################
norm.es <- rma(raw.es)
pca <- prcomp(t(exprs(norm.es)), center = TRUE, scale. = FALSE)
df.pca <- as_tibble(data.frame(pca$x[,1:20], pData(norm.es), stringsAsFactors = FALSE))
percentVar <- pca$sdev^2 / sum(pca$sdev^2) * 100
df.pca %<>%
mutate(Condition = paste(Expression, Timepoint, sep = "_"))
# scree plot
ggplot(data.frame(perc_var=percentVar, pc=1:length(percentVar))[1:10,], aes(x=pc, y=perc_var)) +
geom_point() +
geom_line() +
labs(x="PC", y="Percent Total Variance",
title="PCA: Microarray Expression (log2 Norm.)") +
scale_x_continuous(breaks = seq(1:10)) +
plotTheme()
#colnames(df.pca)
df.pca %>%
ggplot(aes(PC1, PC2, color = Condition)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
rm(pca, df.pca, percentVar)
# Intensity Filtering ##############
# median expression per probe
norm.medians <- rowMedians(exprs(norm.es))
hist(norm.medians, 100, freq = FALSE)
# manual intensity threshold
man_threshold <- 4
abline(v = man_threshold, col = "red", lwd = 2)
# number of samples per group
samples_cutoff <- 6
# indexes of probes with greater than threshold in at least cutoff number of samples
idx_man_threshold <- apply(exprs(norm.es), 1,
function(x){
sum(x > man_threshold) >= samples_cutoff})
table(idx_man_threshold)
# filter for expressed probes
filtered.es <- subset(norm.es, idx_man_threshold)
rm(raw.es, norm.es, idx_man_threshold, norm.medians, man_threshold, samples_cutoff)
# Cluster Annotation #############
annotations <- AnnotationDbi::select(clariomshumantranscriptcluster.db,
keys = featureNames(filtered.es),
columns = c("SYMBOL", "GENENAME", "ENSEMBL", "ENTREZID"),
keytype = "PROBEID")
# remove probes without a gene symbol or other ID
annotations %<>%
dplyr::filter(!(is.na(SYMBOL) & is.na(GENENAME) & is.na(ENSEMBL) & is.na(ENTREZID)))
# probes with more than one annotation, created duplicate probeids
annotations$duplicate.annotation <- duplicated(annotations$PROBEID) | duplicated(annotations$PROBEID, fromLast = TRUE)
# remove all but one duplicate probeid
annotations %<>%
dplyr::filter(!(duplicated(PROBEID)))
# filter expression set for probes with an annotation
final.es <- subset(filtered.es, featureNames(filtered.es) %in% annotations$PROBEID)
# add annotation to expression set
fData(final.es)$PROBEID <- rownames(fData(final.es))
fData(final.es) <- left_join(fData(final.es), annotations, by = "PROBEID")
rownames(fData(final.es)) <- fData(final.es)$PROBEID
validObject(final.es)
rm(annotations, filtered.es)
pca <- prcomp(t(exprs(final.es)), center = TRUE, scale. = FALSE)
df.pca <- as_tibble(data.frame(pca$x[,1:20], pData(final.es), stringsAsFactors = FALSE))
percentVar <- pca$sdev^2 / sum(pca$sdev^2) * 100
df.pca %<>%
mutate(Condition = paste(Expression, Timepoint, sep = "_"))
# scree plot
ggplot(data.frame(perc_var=percentVar, pc=1:length(percentVar))[1:10,], aes(x=pc, y=perc_var)) +
geom_point() +
geom_line() +
labs(x="PC", y="Percent Total Variance",
title="PCA: Microarray Expression (log2 Norm.)") +
scale_x_continuous(breaks = seq(1:10)) +
plotTheme()
#colnames(df.pca)
df.pca %>%
ggplot(aes(PC1, PC2, color = Condition)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
rm(pca, df.pca, percentVar)
# Remove batch effect #################
corrected.expression <- limma::removeBatchEffect(final.es,
covariates = final.es$`CRNA Yield (ug)`,
design = model.matrix(~final.es$Timepoint + final.es$Expression))
pca <- prcomp(t(corrected.expression), center = TRUE, scale. = FALSE)
df.pca <- as_tibble(data.frame(pca$x[,1:20], pData(final.es), stringsAsFactors = FALSE))
percentVar <- pca$sdev^2 / sum(pca$sdev^2) * 100
df.pca %<>%
mutate(Condition = paste(Expression, Timepoint, sep = "_"))
# scree plot
ggplot(data.frame(perc_var=percentVar, pc=1:length(percentVar))[1:10,], aes(x=pc, y=perc_var)) +
geom_point() +
geom_line() +
labs(x="PC", y="Percent Total Variance",
title="PCA: Microarray Expression (log2 Norm.)") +
scale_x_continuous(breaks = seq(1:10)) +
plotTheme()
#colnames(df.pca)
df.pca %>%
ggplot(aes(PC1, PC2, color = Condition)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
rm(pca, df.pca, percentVar)
# possible outliers
pData(final.es)$outlier <- FALSE
pData(final.es)$outlier[pData(final.es)$CEL %in% c("MA13.CEL", "MA21.CEL")] <- TRUE
filt.es <- final.es[,!pData(final.es)$outlier]
corrected.expression <- limma::removeBatchEffect(filt.es,
covariates = filt.es$`CRNA Yield (ug)`,
design = model.matrix(~filt.es$Timepoint + filt.es$Expression))
pca <- prcomp(t(corrected.expression), center = TRUE, scale. = FALSE)
df.pca <- as_tibble(data.frame(pca$x[,1:20], pData(filt.es), stringsAsFactors = FALSE))
percentVar <- pca$sdev^2 / sum(pca$sdev^2) * 100
df.pca %<>%
mutate(Condition = paste(Expression, Timepoint, sep = "_"))
# scree plot
ggplot(data.frame(perc_var=percentVar, pc=1:length(percentVar))[1:10,], aes(x=pc, y=perc_var)) +
geom_point() +
geom_line() +
labs(x="PC", y="Percent Total Variance",
title="PCA: Microarray Expression (log2 Norm.)") +
scale_x_continuous(breaks = seq(1:10)) +
plotTheme()
#colnames(df.pca)
df.pca %>%
ggplot(aes(PC1, PC2, color = Condition)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
df.pca %>%
ggplot(aes(PC1, PC2, color = Qubit.Conc..ng.ul)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
#scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
# Output ##########
# final es
saveRDS(final.es, output.expressionSet.rds)
| /doc/microarray/microarray_qc.R | no_license | mikelaff/mirna-eqtl-manuscript | R | false | false | 17,023 | r |
library(here)
library(readr)
library(dplyr)
library(tidyr)
library(magrittr)
library(ggplot2)
library(ArrayExpress)
library(Biobase)
library(pd.clariom.s.human.ht)
library(clariomshumantranscriptcluster.db)
#library(pd.hugene.1.0.st.v1)
#library(hugene10sttranscriptcluster.db)
#library(AffyCompatible)
library(oligo)
library(arrayQualityMetrics)
library(limma)
library(topGO)
library(ReactomePA)
library(clusterProfiler)
library(gplots)
library(ggplot2)
library(geneplotter)
library(RColorBrewer)
library(pheatmap)
library(enrichplot)
library(stringr)
library(matrixStats)
library(genefilter)
library(openxlsx)
library(mikelaffr)
# OUTPUT ###############################################################################################################
dir.pdfs <- here("doc/microarray/pdfs/")
dir.create(dir.pdfs, recursive = TRUE, showWarnings = FALSE)
dir.quality.metrics <- here("doc/microarray/quality_metrics/")
dir.create(dir.quality.metrics, recursive = TRUE, showWarnings = FALSE)
# background removed, normalized, and filtered expression set
output.expressionSet.rds <- here("results/rdata_files/20220829_es_HNP_Differential_Microarray_SST-RMA.rds")
# INPUT ################################################################################################################
# Microarray ExpressionSet for HNP Differentiataion Experiment
expressionSet.rds <- here("results/rdata_files/20220829_es_HNP_Differentiation_Microarray.rds")
# yep
#tmp.txt <- here("results/microarray/Stein_Mike_Clariom_S_Human_24_08172022/log2.SST-RMA-GENE-FULL - Group 1.TXT")
# GLOBALS ##############################################################################################################
# Import ExpressionSet #################################################################################################
raw.es <- readRDS(expressionSet.rds)
# # yep
# temp.log2.expression <- read_tsv(tmp.txt)
#
# log2.expression %<>%
# select(1:25)
#
# samples <- colnames(log2.expression)[2:25]
# colnames(log2.expression)[2:25] <- paste0(sapply(strsplit(samples, "\\."), `[`, 1), ".CEL")
#
# df.expression <- as.data.frame(log2.expression)
# rownames(df.expression) <- df.expression[,1]
#
# df.expression <- df.expression[,2:25]
# QC Before Correction #################
# simple log2 of expression
log2.expr <- log2(exprs(raw.es))
pca <- prcomp(t(log2.expr), center = TRUE, scale. = FALSE)
# combine with phenotype data
df.pca <- as_tibble(data.frame(pca$x[,1:20], pData(raw.es), stringsAsFactors = FALSE))
percentVar <- pca$sdev^2 / sum(pca$sdev^2) * 100
df.pca %<>%
mutate(Condition = paste(Expression, Timepoint, sep = "_"))
# scree plot
ggplot(data.frame(perc_var=percentVar, pc=1:length(percentVar))[1:10,], aes(x=pc, y=perc_var)) +
geom_point() +
geom_line() +
labs(x="PC", y="Percent Total Variance",
title="PCA: Microarray Expression (log2 Norm.)") +
scale_x_continuous(breaks = seq(1:10)) +
plotTheme()
#colnames(df.pca)
df.pca %>%
ggplot(aes(PC1, PC2, color = Condition)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
#ggsave(paste0(dir.pdfs, "pca_log2_norm.pdf"), height = 6, width = 8)
#pdf(paste0(dir.pdfs, "pca_by_technical.pdf"))
# df.pca %>%
# ggplot(aes(`CRNA Yield (ug)`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`Extraction Date`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`Extraction Date`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(Expression, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(Replicate, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(Timepoint, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`ng/ul`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(A260, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(A280, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`260/280`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`260/230`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`340 raw`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`Total RNA (ng)`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`Core Sample ID`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(RIN, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`28S/18S (Area)`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`TapStation Conc. ng/ul`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(`Qubit Conc. ng/ul`, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#
# df.pca %>%
# ggplot(aes(Condition, PC1)) +
# geom_point(size=3) +
# labs(y=paste("PC1 (", round(percentVar[1], 1), "%)", sep="")) +
# plotTheme() +
# scale_color_manual(values=cbPalette) +
# labs(title="Gene Expression (log2 Norm.)")
#dev.off()
rm(df.pca, log2.expr, pca, percentVar)
# sort(abs(pca$rotation[,1]), decreasing = TRUE)[1:20]
#
# sort(abs(pca$rotation[,2]), decreasing = TRUE)[1:20]
# log2 intensities
oligo::boxplot(raw.es)
# quality metrics
# arrayQualityMetrics(expressionset = raw.es,
# outdir = dir.quality.metrics,
# force = TRUE,
# do.logtransform = TRUE,
# intgroup = c("Expression", "Timepoint"))
# RLE #################
# Relative Log Expression
# rle.es <- rma(raw.es, normalize = FALSE)
#
# row_medians_assayData <- Biobase::rowMedians(as.matrix(exprs(rle.es)))
#
# RLE_data <- sweep(exprs(rle.es), 1, row_medians_assayData)
#
# RLE_data <- as.data.frame(RLE_data)
# RLE_data_gathered <- gather(RLE_data, sample, log2_expression_deviation)
#
# ggplot(RLE_data_gathered, aes(sample,log2_expression_deviation)) +
# geom_boxplot(outlier.shape = NA) +
# ylim(c(-2, 2)) +
# theme(axis.text.x = element_text(colour = "aquamarine4",
# angle = 60, size = 6.5, hjust = 1 ,
# face = "bold"))
# rm(rle.es, RLE_data, RLE_data_gathered, row_medians_assayData)
# RMA Calibration ##################
norm.es <- rma(raw.es)
pca <- prcomp(t(exprs(norm.es)), center = TRUE, scale. = FALSE)
df.pca <- as_tibble(data.frame(pca$x[,1:20], pData(norm.es), stringsAsFactors = FALSE))
percentVar <- pca$sdev^2 / sum(pca$sdev^2) * 100
df.pca %<>%
mutate(Condition = paste(Expression, Timepoint, sep = "_"))
# scree plot
ggplot(data.frame(perc_var=percentVar, pc=1:length(percentVar))[1:10,], aes(x=pc, y=perc_var)) +
geom_point() +
geom_line() +
labs(x="PC", y="Percent Total Variance",
title="PCA: Microarray Expression (log2 Norm.)") +
scale_x_continuous(breaks = seq(1:10)) +
plotTheme()
#colnames(df.pca)
df.pca %>%
ggplot(aes(PC1, PC2, color = Condition)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
rm(pca, df.pca, percentVar)
# Intensity Filtering ##############
# median expression per probe
norm.medians <- rowMedians(exprs(norm.es))
hist(norm.medians, 100, freq = FALSE)
# manual intensity threshold
man_threshold <- 4
abline(v = man_threshold, col = "red", lwd = 2)
# number of samples per group
samples_cutoff <- 6
# indexes of probes with greater than threshold in at least cutoff number of samples
idx_man_threshold <- apply(exprs(norm.es), 1,
function(x){
sum(x > man_threshold) >= samples_cutoff})
table(idx_man_threshold)
# filter for expressed probes
filtered.es <- subset(norm.es, idx_man_threshold)
rm(raw.es, norm.es, idx_man_threshold, norm.medians, man_threshold, samples_cutoff)
# Cluster Annotation #############
annotations <- AnnotationDbi::select(clariomshumantranscriptcluster.db,
keys = featureNames(filtered.es),
columns = c("SYMBOL", "GENENAME", "ENSEMBL", "ENTREZID"),
keytype = "PROBEID")
# remove probes without a gene symbol or other ID
annotations %<>%
dplyr::filter(!(is.na(SYMBOL) & is.na(GENENAME) & is.na(ENSEMBL) & is.na(ENTREZID)))
# probes with more than one annotation, created duplicate probeids
annotations$duplicate.annotation <- duplicated(annotations$PROBEID) | duplicated(annotations$PROBEID, fromLast = TRUE)
# remove all but one duplicate probeid
annotations %<>%
dplyr::filter(!(duplicated(PROBEID)))
# filter expression set for probes with an annotation
final.es <- subset(filtered.es, featureNames(filtered.es) %in% annotations$PROBEID)
# add annotation to expression set
fData(final.es)$PROBEID <- rownames(fData(final.es))
fData(final.es) <- left_join(fData(final.es), annotations, by = "PROBEID")
rownames(fData(final.es)) <- fData(final.es)$PROBEID
validObject(final.es)
rm(annotations, filtered.es)
pca <- prcomp(t(exprs(final.es)), center = TRUE, scale. = FALSE)
df.pca <- as_tibble(data.frame(pca$x[,1:20], pData(final.es), stringsAsFactors = FALSE))
percentVar <- pca$sdev^2 / sum(pca$sdev^2) * 100
df.pca %<>%
mutate(Condition = paste(Expression, Timepoint, sep = "_"))
# scree plot
ggplot(data.frame(perc_var=percentVar, pc=1:length(percentVar))[1:10,], aes(x=pc, y=perc_var)) +
geom_point() +
geom_line() +
labs(x="PC", y="Percent Total Variance",
title="PCA: Microarray Expression (log2 Norm.)") +
scale_x_continuous(breaks = seq(1:10)) +
plotTheme()
#colnames(df.pca)
df.pca %>%
ggplot(aes(PC1, PC2, color = Condition)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
rm(pca, df.pca, percentVar)
# Remove batch effect #################
corrected.expression <- limma::removeBatchEffect(final.es,
covariates = final.es$`CRNA Yield (ug)`,
design = model.matrix(~final.es$Timepoint + final.es$Expression))
pca <- prcomp(t(corrected.expression), center = TRUE, scale. = FALSE)
df.pca <- as_tibble(data.frame(pca$x[,1:20], pData(final.es), stringsAsFactors = FALSE))
percentVar <- pca$sdev^2 / sum(pca$sdev^2) * 100
df.pca %<>%
mutate(Condition = paste(Expression, Timepoint, sep = "_"))
# scree plot
ggplot(data.frame(perc_var=percentVar, pc=1:length(percentVar))[1:10,], aes(x=pc, y=perc_var)) +
geom_point() +
geom_line() +
labs(x="PC", y="Percent Total Variance",
title="PCA: Microarray Expression (log2 Norm.)") +
scale_x_continuous(breaks = seq(1:10)) +
plotTheme()
#colnames(df.pca)
df.pca %>%
ggplot(aes(PC1, PC2, color = Condition)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
rm(pca, df.pca, percentVar)
# possible outliers
pData(final.es)$outlier <- FALSE
pData(final.es)$outlier[pData(final.es)$CEL %in% c("MA13.CEL", "MA21.CEL")] <- TRUE
filt.es <- final.es[,!pData(final.es)$outlier]
corrected.expression <- limma::removeBatchEffect(filt.es,
covariates = filt.es$`CRNA Yield (ug)`,
design = model.matrix(~filt.es$Timepoint + filt.es$Expression))
pca <- prcomp(t(corrected.expression), center = TRUE, scale. = FALSE)
df.pca <- as_tibble(data.frame(pca$x[,1:20], pData(filt.es), stringsAsFactors = FALSE))
percentVar <- pca$sdev^2 / sum(pca$sdev^2) * 100
df.pca %<>%
mutate(Condition = paste(Expression, Timepoint, sep = "_"))
# scree plot
ggplot(data.frame(perc_var=percentVar, pc=1:length(percentVar))[1:10,], aes(x=pc, y=perc_var)) +
geom_point() +
geom_line() +
labs(x="PC", y="Percent Total Variance",
title="PCA: Microarray Expression (log2 Norm.)") +
scale_x_continuous(breaks = seq(1:10)) +
plotTheme()
#colnames(df.pca)
df.pca %>%
ggplot(aes(PC1, PC2, color = Condition)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
df.pca %>%
ggplot(aes(PC1, PC2, color = Qubit.Conc..ng.ul)) +
geom_point(size = 3) +
#geom_label(hjust="inward", vjust="inward") +
labs(x=paste("PC1 (", round(percentVar[1], 1), "%)", sep=""),
y=paste("PC2 (", round(percentVar[2], 1), "%)", sep="")) +
plotTheme() +
#scale_color_manual(values=cbPalette) +
labs(title="Microarray Expression (log2 Norm.)")
# Output ##########
# final es
saveRDS(final.es, output.expressionSet.rds)
|
## It computes a special "matrix".
## which contains the following function- To set the value of matrix, to get the value of the matrix, set the value of inverse of matrix,
# get the inverse of matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(
set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function calculates the inverse of special "matrix" returned by makecacheMatrix above.It first see whether the inverse is calculated or not.
# and if it is calculated it gets the inverse from cache and skips the computation.Otherwise it calculates the inverse
# of the matrix and sets the value in cache via setinverse function. Then it returns the matrix with inverse of X.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
| /programming assignment2.R | no_license | Jyoti4/ProgrammingAssignment2 | R | false | false | 1,195 | r | ## It computes a special "matrix".
## which contains the following function- To set the value of matrix, to get the value of the matrix, set the value of inverse of matrix,
# get the inverse of matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(
set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function calculates the inverse of special "matrix" returned by makecacheMatrix above.It first see whether the inverse is calculated or not.
# and if it is calculated it gets the inverse from cache and skips the computation.Otherwise it calculates the inverse
# of the matrix and sets the value in cache via setinverse function. Then it returns the matrix with inverse of X.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
#' ---
#' title: "Gráficos com ggplot2"
#' output: html_notebook
#' ---
## ----setup, include=FALSE, message=FALSE, warning=FALSE------------------
knitr::opts_chunk$set(echo = TRUE, warning = FALSE, message = FALSE, out.width = "600px", out.height="400px")
Sys.setlocale("LC_ALL", "pt_BR")
#'
## ------------------------------------------------------------------------
# bibliotecas utilizadas
if (!"Hmisc" %in% installed.packages()) install.packages("Hmisc")
if (!"ggcorrplot" %in% installed.packages()) install.packages("ggcorrplot")
library(tidyverse)
library(lubridate)
library(magrittr)
library(Hmisc)
#'
#' ## Por que visualizar os dados?
#'
#' - [Quarteto de Anscombe e a importância da visualização](https://pt.wikipedia.org/wiki/Quarteto_de_Anscombe)
#' - [Datasaurus Dozen](https://www.autodeskresearch.com/publications/samestats)
#'
#' ## O que é análise exploratória com gráficos?
#'
#' [Wikipedia](https://pt.wikipedia.org/wiki/An%C3%A1lise_explorat%C3%B3ria_de_dados)
#'
#' - Forma de responder visualmente a questões levantadas sobre propriedades e relacionamentos entre variáveis de um _dataset_.
#' - Complemento visual para as estatísticas descritivas.
#' - Fase de descoberta de problemas, padrões e relacionamentos.
#' - Fundamental para modelagem e para a comunicação.
#'
#'
#' ## ggplot2 (1)
#'
#' Biblioteca gráfica baseada na [gramática dos gráficos composta em camadas](http://vita.had.co.nz/papers/layered-grammar.html).
#'
#' - Conecta variáveis de Data Frames com elementos gráficos através de abstrações que tornam a visualização independente dos dados tabulares.
#'
#' - Resolve de forma simples muitas das complexidades da criação de gráficos, como posicionamento de legendas, escalas de cores e formatação de textos em gráficos.
#'
#' - Expõe um modelo de composição em camadas que facilita o enriquecimento visual através da adição de camadas.
#'
#' ## ggplot2 (2)
#'
#' Cumpre um papel dual:
#'
#' - Fornece resultados gráficos de alta qualidade, utilizado em publicações e sites de notícias
#' + FiveThirtyEight, New York Times são exemplos
#' - Possibilita a criação de visualizações rápidas para análise exploratória com poucas linhas
#'
#' O modelo de adição de camadas permite enriquecer um gráfico exploratório simples para ter qualidade de publicação.
#'
## ----echo=FALSE, message=FALSE, warning=FALSE----------------------------
### Carga dos dados de exemplo
ted_talks <- read_csv("aula-05/data/ted_main.csv.gz")
ted_talks <- read_csv("aula-05/data/ted_main.csv.gz") %>%
mutate( duration = duration(duration, units = "seconds")
, film_date = as_datetime(film_date) %>% as_date()
, published_date = as_datetime(published_date)) %>%
mutate( event = factor(event)
, speaker_occupation = factor(speaker_occupation)) %>%
select(title, views, comments, duration:main_speaker, num_speaker:published_date, speaker_occupation)
ted_talks
#'
#' ## Componentes mínimos necessários
#'
#' O `ggplot`, assim como o `dplyr`, utiliza uma DSL (Domain Specific Language, Linguagem Específica de Domínio) que descreve seus componente.
#'
#' Os principais conceitos do `ggplot` são:
#'
#' - **canvas**, espaço visual onde formas geométricas serão exibidas
#' - **estéticas**, que são propriedades visuais dos elementos gráficos
#' - **mapeamento de estéticas**, que conecta as propriedades visuais com variáveis dos data frames
#' - **geometrias**, formas geométricas exibidas no canvas
#' - **escalas**, que são controles visuais das variáveis mapeadas para estéticas
#' - **tema**, que define atributos visuais do canvas
#'
#' Tomando como exemplo um gráfico de pontos, vamos visualizar a quantidade de linguagens por vídeo ao longo dos anos. Para este gráfico escolhi utilizar a data de filmagem.
#'
#' - Iniciamos com a criação da variável ano a partir da variável film_date. O data frame resultante desta transformação é conectado às propriedades visuais por meio do mapeamento de estéticas, que conecta a variável year com o eixo **x** e a variável languages com o eixo **y**. Estas variáveis serão utilizadas como _default_ em todas as formas geométricas deste gráfico.
#'
#' - A seguir, utilizamos a forma geométrica do ponto. Neste exemplo modificamos a propriedade _alpha_ para adicionar transparência ao preencimento do ponto. Esta transparência facilita a identificação visual da concentração de pontos em um mesmo par (x, y)
#'
#' - A escala _x_ é modificada para que os rótulos exibam os anos de 5 em 5.
#'
#' - Escolho o tema theme_bw, que utiliza padrões de preto e branco.
#'
## ------------------------------------------------------------------------
ted_talks %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages )) +
geom_point( alpha = .3 ) +
scale_x_continuous( breaks = seq( from = 1970, to = 2020, by = 5 )) +
theme_bw()
#'
#' ### Análise do gráfico
#'
#' O que identificamos em relação aos mínimos e máximos?
#' Onde temos maior ocorrência de apresentações, nos eixos x e y?
#' Que padrões a transparência destaca?
#'
#'
#' ## Rótulos
#'
#' No ggplot2 os rótulos podem ser inseridos de diferentes formas. A forma mais consistente é através da função **labs**, que possibilita informar o rótulo de cada estética. No exemplo abaixo atualizei os rótulos dos eixos **x** e **y**, e aproveitei para inserir títulos no gráfico.
#'
#' Seguindo com o exemplo de Quantidade de Línguas por ano, vamos reduzir o período para considerar somente apresentações de 2005 em diante. Vídeos que estavam sem quantidade de línguas foram modificados para ter 1 língua.
#'
#' Pelo padrão abaixo, a decisão de inserir artificialmente 1 língua pareceu acertada?
#'
## ------------------------------------------------------------------------
ted_talks_recentes <- ted_talks %>%
filter(film_date >= ymd(20050101)) %>%
mutate(languages = if_else(languages == 0, 1L, languages))
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages )) +
geom_point( alpha = .3 ) +
scale_x_continuous( breaks = 2005:2017) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, subtitle = "Período considerado somente a partir de 2005. Dados ajustados para mínimo de 1 língua por apresentação."
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#'
#' ## Estatísticas
#'
#' Estatísticas são combinações de formas geométricas que apresentam visualmente o resultado de estatísticas aplicadas sobre grupos. No exemplo abaixo substituimos a forma de pontos pela forma de resumo `stat_summary`. Esta forma requer uma função que será aplicada sobre a estética **y** para dela derivar um novo **y** central, e mais as estéticas **ymin** e **ymax**. A função escolhida neste caso foi a função `mean_sdl` que retorna o **y** central como a média, o **ymin** como 2 desvios padrão abaixo da média e **ymax** como 2 desvios padrão acima da média.
#'
#' - Incluí uma escala para o eixo y
#' - Reparem que é possível acrescentar quebras nos textos. Neste caso inseri uma quebra de linha no subtítulo
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages )) +
stat_summary(fun.data = mean_sdl) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = -10, to = 60, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, subtitle = "Período considerado somente a partir de 2005. Dados ajustados para mínimo de 1 língua por apresentação.\n O ponto é a média no ano e a barra vertical representa o intervalo de 2 desvios acima e abaixo da média."
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#' > ATIVIDADE
#'
#' Repetir os gráficos de pontos e de sumário utilizando o ano de publicação no eixo x e a duração no eixo y. Cuidado com a escala do eixo y!
#'
#' > FIM ATIVIDADE
#'
#' ## Gráficos de barras
#'
#' Diferentes formas geométricas do `ggplot` resultam em barras:
#'
#' - `geom_col`, quando uma variável do data frame representa o tamanho da barra. Requer as estéticas **x** e **y**.
#' - `geom_bar`, quando o tamanho da barra for a contagem de observações. Requer a estética **x**.
#'
#' #### Exemplo com geom_col
#'
#' Neste exemplo vemos que é possível determinar a cor de preenchimento da barra através da estética **fill**. Vemos também que é possível formatar um eixo em milhares utilizando a função `format_format` do pacote `scales`.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
group_by(year) %>%
summarise(sum_views = sum(views)) %>%
ungroup() %>%
ggplot( aes( x = year, y = sum_views )) +
geom_col(fill="blue", alpha=0.6) +
scale_x_continuous(breaks = 2005:2017) +
scale_y_continuous(labels = scales::format_format(big.mark = ".", decimal.mark=",", scientific = FALSE)) +
labs( x = "Ano de filmagem"
, y = "Total de visualizações de apresentações"
, title = "Exemplo com geom_col"
, subtitle = "Exibição do total de visualizações de apresentações de um mesmo ano de filmagem") +
theme_bw()
#'
#' #### Exemplo com geom_bar
#'
## ------------------------------------------------------------------------
ggplot(ted_talks_recentes, aes( x = year( film_date ))) +
geom_bar( fill="blue", color = "blue", alpha=0.6 ) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq( from = 50, to = 300, by = 50 )) +
labs( x = "Ano de filmagem"
, y = "Total de apresentações publicadas"
, title = "Exemplo com geom_bar" ) +
theme_bw()
#'
#' #### Cores e grupos
#'
#' No ggplot podemos determinar a cor de uma forma geométrica a partir do mapeamento de uma estética. As cores podem ser em escala contínua quando a variável for numérica e em paletas de cores quando a variável for categórica.
#'
#' No exemplo abaixo, distiguimos a quantidade de apresentações publicadas por mês utilizando diferentes cores.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( ano = year( published_date ), mes = month( published_date, label = TRUE )) %>%
ggplot(aes( x = ano, fill = mes )) +
geom_bar( alpha=0.6, color="black" ) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq( from = 50, to = 300, by = 50 )) +
labs( x = "Ano de filmagem"
, y = "Total de apresentações"
, fill = "Mês do ano"
, title = "Publicações por mês em cada ano" ) +
theme_bw()
#'
#' ## Facetas
#'
#' Facetas são um recurso que possibilita a divisão de um canvas em vários gráficos homogêneos, separados por uma variável do data frame.
#'
#' Neste exemplo repetimos o gráfico de barras original utilizando a função **facet_wrap**. Por meio desta função temos agora um gráfico por mês, organizados em um grid 3 x 4.
#'
#' Ainda, o tema foi modificado para que o texto do eixo **x** seja exibido em um ângulo de 45 graus, viabilizando a exibição de todos os anos do intervalo.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( ano = year( published_date ), mes = month( published_date, label = TRUE )) %>%
ggplot(aes( x = ano )) +
geom_bar( alpha=0.6 ) +
scale_x_continuous( breaks = 2005:2017 ) +
facet_wrap (~ mes, ncol = 3 ) +
labs( x = "Ano de filmagem"
, y = "Total de apresentações"
, fill = "Mês do ano"
, title = "Publicações por mês em cada ano" ) +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#'
#' ## Boxplot
#'
#' Boxplot é um tipo de gráfico que apresenta as relações de quartis de forma estruturada e contextualizada, além de indicar as faixas de valores.
#'
#' Como o ano é uma variável numérica, é necessário um mapeamento de estética para que a forma geométrica de boxplot compreenda como um grupo.
#'
#' O boxplot apresenta as seguintes estatísticas:
#'
#' - **Mediana**, a linha horizontal localizada dentro do retângulo.
#' - **Primeiro e terceiro quartis**, limites inferior e superior do retângulo.
#' - Relação Interquartil (**IQR**), onde:
#' + a linha vertical superior alcança o maior valor menor ou igual a 1.5 * o IQR somado ao terceiro quartil
#' + a linha vertical inferior alcança o menor valor maior ou igual a 1.5 * o IQR subtraído do primeiro quartil
#' - **Outliers**, que são quaisquer medidas que excedem as linhas de IQR
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages, group = year )) +
geom_boxplot() +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = 0, to = 100, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#' A forma geométrica de boxplot do ggplot2 possibilita customizar alguns dos componentes. No exemplo abaixo modifiquei a cor e a transparência dos pontos de outlier.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages, group = year )) +
geom_boxplot(outlier.color = "red", outlier.alpha = 0.8) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = 0, to = 100, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#' ### Composição com múltiplos gráficos
#'
#' Assim como compomos gráficos combinando escalas, mapeamentos de estéticas e formas geométricas, podemos também combinar múltiplasa formas geométricas. O exemplo abaixo combina o boxplot com gráfico de pontos para ilustrar como as observações estão distribuídas.
#'
#' - **Jitter** (tremor) é uma variação de gráfico de pontos onde a posição é deslocada aleatoriamente em uma fração de altura e de largura. O exemplo abaixo aplica **jitter** para distribuir horizontalmente de forma que todas as observações estejam visíveis dentro de cada ano. Foi necessário aplicar uma transparência para melhor identificar a concentração
#' - As formas geométricas são sobrepostas na ordem em que são adicionadas ao canvas. No exemplo abaixo apliquei transparência no boxplot para que seja possível visualizar os pontos.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages, group = year )) +
geom_jitter(alpha = .2, height = 0, width = 0.3) +
geom_boxplot(outlier.color = "red", outlier.alpha = 0.8, alpha = 0.2) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = 0, to = 100, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#' Combinando **jitter** com **stat_summary**
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages )) +
geom_jitter(alpha = .2, height = 0, width = 0.3) +
stat_summary(fun.data = mean_sdl, color="red") +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = -10, to = 80, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, subtitle = "Período considerado somente a partir de 2005. Dados ajustados para mínimo de 1 língua por apresentação.\n O ponto é a média no ano e a barra vertical representa o intervalo de 2 desvios acima e abaixo da média."
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#'
#' ## Faixas de banda
#'
#' A forma geométrica de banda é outra maneira de demarcar visualmente os limites superior e inferior através de estatísticas descritivas. Esta forma requer as estéticas **ymin** e **ymax**, que foram previamente calculadas no Data Frame.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
group_by(year) %>%
mutate(low = mean(languages) - 2 * sd(languages), hi = mean(languages) + 2 * sd(languages)) %>%
ungroup() %>%
ggplot( aes( x = year, y = languages, ymin = low, ymax = hi )) +
geom_ribbon(fill = "lightgray", alpha = 0.5) +
geom_jitter(alpha = .2, height = 0, width = 0.3) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = -10, to = 80, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da quantidade de línguas por vídeo ao longo dos anos"
, subtitle = "Período considerado somente a partir de 2005. Dados ajustados para mínimo de 1 língua por apresentação.\n A faixa cinza correponde ao intervalo de 2 desvios padrão acima e abaixo da média, calculados ano a ano."
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#' ## Correlograma
#'
#' **ggcorrplot** possibilita visualizar as correlações entre variáveis numéricas. A matriz de correlações deve ser previamente calculada utilizando a função `cor`.
#'
## ------------------------------------------------------------------------
library(ggcorrplot)
corr <-
ted_talks_recentes %>%
select_if(is_numeric) %>%
mutate( duration = as.numeric(duration)
, published_date = as.numeric(published_date)
, film_date = as.numeric(film_date)) %>%
select(-event, -speaker_occupation) %>%
cor() %>% round(2)
ggcorrplot(corr, hc.order = TRUE, type = "lower", lab = TRUE)
#'
#'
#' ## Histograma
#'
#' > ATIVIDADE
#'
#' 1. Estude o material abaixo que explica a construção de histogramas
#'
#' - [http://flowingdata.com/2017/06/07/how-histograms-work/](http://flowingdata.com/2017/06/07/how-histograms-work/)
#' - [http://tinlizzie.org/histograms/](http://tinlizzie.org/histograms/)
#'
#' 2. Estude o help da função `geom_histogram`
scale_y_continuous(labels = scales::format_format(big.mark = ".", decimal.mark=",", scientific = FALSE)) +
labs( x = "Ano de filmagem"
, y = "Total de visualizações de apresentações"
, title = "Exemplo com geom_col"
, subtitle = "Exibição do total de visualizações de apresentações de um mesmo ano de filmagem") +
theme_bw()
#' 3. Crie um histograma da quantidade de visualizações multifacetado por ano de publicação, restrito aos anos entre 2012 e 2017.
ted_talks_recentes %>% filter(year(film_date) >= 2012) -> TED_FINAL
TED_FINAL %>% group_by(year(film_date)) %>%
summarise(ano = n()) %>%
ungroup()
TED_FINAL %>% mutate(ano = year(film_date)) -> TED_FINALL
TED_FINAL %>% ggplot(aes(x = views)) +
geom_histogram()
TED_FINALL %>% ggplot(aes(x = views)) +
geom_histogram(color="darkblue", fill="lightblue", bins = 100)
TED_FINALL %>% ggplot(aes(x = views)) +
geom_histogram(color="darkblue", fill="lightblue", bins = 100) +
scale_x_continuous(labels = scales::format_format(scientific = FALSE))
TED_FINALL %>% ggplot(aes(x = views)) +
geom_histogram(color="darkblue", fill="lightblue", bins = 100) +
scale_x_continuous(labels = scales::format_format(scientific = FALSE)) +
labs(title = "Histograma") +
theme_bw()
TED_FINALL %>% ggplot(aes(x = views)) +
geom_histogram(color="darkblue", fill="lightblue", bins = 100) +
scale_x_continuous(labels = scales::format_format(scientific = FALSE)) +
labs(title = "Histograma") +
theme_classic()
TED_FINALL %>% ggplot(aes(x = views)) +
geom_histogram(color="darkblue", fill="lightblue", bins = 50) +
scale_x_continuous(labels = scales::format_format(scientific = FALSE)) +
geom_vline(aes(xintercept=mean(views)), color="red",linetype="dashed") +
labs(title = "Histograma") +
theme_classic()
| /aula-05/02-graficos-ggplot2.R | no_license | bernicker/data-analysis_with_R-201801 | R | false | false | 21,454 | r | #' ---
#' title: "Gráficos com ggplot2"
#' output: html_notebook
#' ---
## ----setup, include=FALSE, message=FALSE, warning=FALSE------------------
knitr::opts_chunk$set(echo = TRUE, warning = FALSE, message = FALSE, out.width = "600px", out.height="400px")
Sys.setlocale("LC_ALL", "pt_BR")
#'
## ------------------------------------------------------------------------
# bibliotecas utilizadas
if (!"Hmisc" %in% installed.packages()) install.packages("Hmisc")
if (!"ggcorrplot" %in% installed.packages()) install.packages("ggcorrplot")
library(tidyverse)
library(lubridate)
library(magrittr)
library(Hmisc)
#'
#' ## Por que visualizar os dados?
#'
#' - [Quarteto de Anscombe e a importância da visualização](https://pt.wikipedia.org/wiki/Quarteto_de_Anscombe)
#' - [Datasaurus Dozen](https://www.autodeskresearch.com/publications/samestats)
#'
#' ## O que é análise exploratória com gráficos?
#'
#' [Wikipedia](https://pt.wikipedia.org/wiki/An%C3%A1lise_explorat%C3%B3ria_de_dados)
#'
#' - Forma de responder visualmente a questões levantadas sobre propriedades e relacionamentos entre variáveis de um _dataset_.
#' - Complemento visual para as estatísticas descritivas.
#' - Fase de descoberta de problemas, padrões e relacionamentos.
#' - Fundamental para modelagem e para a comunicação.
#'
#'
#' ## ggplot2 (1)
#'
#' Biblioteca gráfica baseada na [gramática dos gráficos composta em camadas](http://vita.had.co.nz/papers/layered-grammar.html).
#'
#' - Conecta variáveis de Data Frames com elementos gráficos através de abstrações que tornam a visualização independente dos dados tabulares.
#'
#' - Resolve de forma simples muitas das complexidades da criação de gráficos, como posicionamento de legendas, escalas de cores e formatação de textos em gráficos.
#'
#' - Expõe um modelo de composição em camadas que facilita o enriquecimento visual através da adição de camadas.
#'
#' ## ggplot2 (2)
#'
#' Cumpre um papel dual:
#'
#' - Fornece resultados gráficos de alta qualidade, utilizado em publicações e sites de notícias
#' + FiveThirtyEight, New York Times são exemplos
#' - Possibilita a criação de visualizações rápidas para análise exploratória com poucas linhas
#'
#' O modelo de adição de camadas permite enriquecer um gráfico exploratório simples para ter qualidade de publicação.
#'
## ----echo=FALSE, message=FALSE, warning=FALSE----------------------------
### Carga dos dados de exemplo
ted_talks <- read_csv("aula-05/data/ted_main.csv.gz")
ted_talks <- read_csv("aula-05/data/ted_main.csv.gz") %>%
mutate( duration = duration(duration, units = "seconds")
, film_date = as_datetime(film_date) %>% as_date()
, published_date = as_datetime(published_date)) %>%
mutate( event = factor(event)
, speaker_occupation = factor(speaker_occupation)) %>%
select(title, views, comments, duration:main_speaker, num_speaker:published_date, speaker_occupation)
ted_talks
#'
#' ## Componentes mínimos necessários
#'
#' O `ggplot`, assim como o `dplyr`, utiliza uma DSL (Domain Specific Language, Linguagem Específica de Domínio) que descreve seus componente.
#'
#' Os principais conceitos do `ggplot` são:
#'
#' - **canvas**, espaço visual onde formas geométricas serão exibidas
#' - **estéticas**, que são propriedades visuais dos elementos gráficos
#' - **mapeamento de estéticas**, que conecta as propriedades visuais com variáveis dos data frames
#' - **geometrias**, formas geométricas exibidas no canvas
#' - **escalas**, que são controles visuais das variáveis mapeadas para estéticas
#' - **tema**, que define atributos visuais do canvas
#'
#' Tomando como exemplo um gráfico de pontos, vamos visualizar a quantidade de linguagens por vídeo ao longo dos anos. Para este gráfico escolhi utilizar a data de filmagem.
#'
#' - Iniciamos com a criação da variável ano a partir da variável film_date. O data frame resultante desta transformação é conectado às propriedades visuais por meio do mapeamento de estéticas, que conecta a variável year com o eixo **x** e a variável languages com o eixo **y**. Estas variáveis serão utilizadas como _default_ em todas as formas geométricas deste gráfico.
#'
#' - A seguir, utilizamos a forma geométrica do ponto. Neste exemplo modificamos a propriedade _alpha_ para adicionar transparência ao preencimento do ponto. Esta transparência facilita a identificação visual da concentração de pontos em um mesmo par (x, y)
#'
#' - A escala _x_ é modificada para que os rótulos exibam os anos de 5 em 5.
#'
#' - Escolho o tema theme_bw, que utiliza padrões de preto e branco.
#'
## ------------------------------------------------------------------------
ted_talks %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages )) +
geom_point( alpha = .3 ) +
scale_x_continuous( breaks = seq( from = 1970, to = 2020, by = 5 )) +
theme_bw()
#'
#' ### Análise do gráfico
#'
#' O que identificamos em relação aos mínimos e máximos?
#' Onde temos maior ocorrência de apresentações, nos eixos x e y?
#' Que padrões a transparência destaca?
#'
#'
#' ## Rótulos
#'
#' No ggplot2 os rótulos podem ser inseridos de diferentes formas. A forma mais consistente é através da função **labs**, que possibilita informar o rótulo de cada estética. No exemplo abaixo atualizei os rótulos dos eixos **x** e **y**, e aproveitei para inserir títulos no gráfico.
#'
#' Seguindo com o exemplo de Quantidade de Línguas por ano, vamos reduzir o período para considerar somente apresentações de 2005 em diante. Vídeos que estavam sem quantidade de línguas foram modificados para ter 1 língua.
#'
#' Pelo padrão abaixo, a decisão de inserir artificialmente 1 língua pareceu acertada?
#'
## ------------------------------------------------------------------------
ted_talks_recentes <- ted_talks %>%
filter(film_date >= ymd(20050101)) %>%
mutate(languages = if_else(languages == 0, 1L, languages))
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages )) +
geom_point( alpha = .3 ) +
scale_x_continuous( breaks = 2005:2017) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, subtitle = "Período considerado somente a partir de 2005. Dados ajustados para mínimo de 1 língua por apresentação."
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#'
#' ## Estatísticas
#'
#' Estatísticas são combinações de formas geométricas que apresentam visualmente o resultado de estatísticas aplicadas sobre grupos. No exemplo abaixo substituimos a forma de pontos pela forma de resumo `stat_summary`. Esta forma requer uma função que será aplicada sobre a estética **y** para dela derivar um novo **y** central, e mais as estéticas **ymin** e **ymax**. A função escolhida neste caso foi a função `mean_sdl` que retorna o **y** central como a média, o **ymin** como 2 desvios padrão abaixo da média e **ymax** como 2 desvios padrão acima da média.
#'
#' - Incluí uma escala para o eixo y
#' - Reparem que é possível acrescentar quebras nos textos. Neste caso inseri uma quebra de linha no subtítulo
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages )) +
stat_summary(fun.data = mean_sdl) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = -10, to = 60, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, subtitle = "Período considerado somente a partir de 2005. Dados ajustados para mínimo de 1 língua por apresentação.\n O ponto é a média no ano e a barra vertical representa o intervalo de 2 desvios acima e abaixo da média."
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#' > ATIVIDADE
#'
#' Repetir os gráficos de pontos e de sumário utilizando o ano de publicação no eixo x e a duração no eixo y. Cuidado com a escala do eixo y!
#'
#' > FIM ATIVIDADE
#'
#' ## Gráficos de barras
#'
#' Diferentes formas geométricas do `ggplot` resultam em barras:
#'
#' - `geom_col`, quando uma variável do data frame representa o tamanho da barra. Requer as estéticas **x** e **y**.
#' - `geom_bar`, quando o tamanho da barra for a contagem de observações. Requer a estética **x**.
#'
#' #### Exemplo com geom_col
#'
#' Neste exemplo vemos que é possível determinar a cor de preenchimento da barra através da estética **fill**. Vemos também que é possível formatar um eixo em milhares utilizando a função `format_format` do pacote `scales`.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
group_by(year) %>%
summarise(sum_views = sum(views)) %>%
ungroup() %>%
ggplot( aes( x = year, y = sum_views )) +
geom_col(fill="blue", alpha=0.6) +
scale_x_continuous(breaks = 2005:2017) +
scale_y_continuous(labels = scales::format_format(big.mark = ".", decimal.mark=",", scientific = FALSE)) +
labs( x = "Ano de filmagem"
, y = "Total de visualizações de apresentações"
, title = "Exemplo com geom_col"
, subtitle = "Exibição do total de visualizações de apresentações de um mesmo ano de filmagem") +
theme_bw()
#'
#' #### Exemplo com geom_bar
#'
## ------------------------------------------------------------------------
ggplot(ted_talks_recentes, aes( x = year( film_date ))) +
geom_bar( fill="blue", color = "blue", alpha=0.6 ) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq( from = 50, to = 300, by = 50 )) +
labs( x = "Ano de filmagem"
, y = "Total de apresentações publicadas"
, title = "Exemplo com geom_bar" ) +
theme_bw()
#'
#' #### Cores e grupos
#'
#' No ggplot podemos determinar a cor de uma forma geométrica a partir do mapeamento de uma estética. As cores podem ser em escala contínua quando a variável for numérica e em paletas de cores quando a variável for categórica.
#'
#' No exemplo abaixo, distiguimos a quantidade de apresentações publicadas por mês utilizando diferentes cores.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( ano = year( published_date ), mes = month( published_date, label = TRUE )) %>%
ggplot(aes( x = ano, fill = mes )) +
geom_bar( alpha=0.6, color="black" ) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq( from = 50, to = 300, by = 50 )) +
labs( x = "Ano de filmagem"
, y = "Total de apresentações"
, fill = "Mês do ano"
, title = "Publicações por mês em cada ano" ) +
theme_bw()
#'
#' ## Facetas
#'
#' Facetas são um recurso que possibilita a divisão de um canvas em vários gráficos homogêneos, separados por uma variável do data frame.
#'
#' Neste exemplo repetimos o gráfico de barras original utilizando a função **facet_wrap**. Por meio desta função temos agora um gráfico por mês, organizados em um grid 3 x 4.
#'
#' Ainda, o tema foi modificado para que o texto do eixo **x** seja exibido em um ângulo de 45 graus, viabilizando a exibição de todos os anos do intervalo.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( ano = year( published_date ), mes = month( published_date, label = TRUE )) %>%
ggplot(aes( x = ano )) +
geom_bar( alpha=0.6 ) +
scale_x_continuous( breaks = 2005:2017 ) +
facet_wrap (~ mes, ncol = 3 ) +
labs( x = "Ano de filmagem"
, y = "Total de apresentações"
, fill = "Mês do ano"
, title = "Publicações por mês em cada ano" ) +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#'
#' ## Boxplot
#'
#' Boxplot é um tipo de gráfico que apresenta as relações de quartis de forma estruturada e contextualizada, além de indicar as faixas de valores.
#'
#' Como o ano é uma variável numérica, é necessário um mapeamento de estética para que a forma geométrica de boxplot compreenda como um grupo.
#'
#' O boxplot apresenta as seguintes estatísticas:
#'
#' - **Mediana**, a linha horizontal localizada dentro do retângulo.
#' - **Primeiro e terceiro quartis**, limites inferior e superior do retângulo.
#' - Relação Interquartil (**IQR**), onde:
#' + a linha vertical superior alcança o maior valor menor ou igual a 1.5 * o IQR somado ao terceiro quartil
#' + a linha vertical inferior alcança o menor valor maior ou igual a 1.5 * o IQR subtraído do primeiro quartil
#' - **Outliers**, que são quaisquer medidas que excedem as linhas de IQR
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages, group = year )) +
geom_boxplot() +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = 0, to = 100, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#' A forma geométrica de boxplot do ggplot2 possibilita customizar alguns dos componentes. No exemplo abaixo modifiquei a cor e a transparência dos pontos de outlier.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages, group = year )) +
geom_boxplot(outlier.color = "red", outlier.alpha = 0.8) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = 0, to = 100, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#' ### Composição com múltiplos gráficos
#'
#' Assim como compomos gráficos combinando escalas, mapeamentos de estéticas e formas geométricas, podemos também combinar múltiplasa formas geométricas. O exemplo abaixo combina o boxplot com gráfico de pontos para ilustrar como as observações estão distribuídas.
#'
#' - **Jitter** (tremor) é uma variação de gráfico de pontos onde a posição é deslocada aleatoriamente em uma fração de altura e de largura. O exemplo abaixo aplica **jitter** para distribuir horizontalmente de forma que todas as observações estejam visíveis dentro de cada ano. Foi necessário aplicar uma transparência para melhor identificar a concentração
#' - As formas geométricas são sobrepostas na ordem em que são adicionadas ao canvas. No exemplo abaixo apliquei transparência no boxplot para que seja possível visualizar os pontos.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages, group = year )) +
geom_jitter(alpha = .2, height = 0, width = 0.3) +
geom_boxplot(outlier.color = "red", outlier.alpha = 0.8, alpha = 0.2) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = 0, to = 100, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#' Combinando **jitter** com **stat_summary**
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
ggplot( aes( x = year, y = languages )) +
geom_jitter(alpha = .2, height = 0, width = 0.3) +
stat_summary(fun.data = mean_sdl, color="red") +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = -10, to = 80, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da Quantidade de Línguas por vídeo ao longo dos anos"
, subtitle = "Período considerado somente a partir de 2005. Dados ajustados para mínimo de 1 língua por apresentação.\n O ponto é a média no ano e a barra vertical representa o intervalo de 2 desvios acima e abaixo da média."
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#'
#' ## Faixas de banda
#'
#' A forma geométrica de banda é outra maneira de demarcar visualmente os limites superior e inferior através de estatísticas descritivas. Esta forma requer as estéticas **ymin** e **ymax**, que foram previamente calculadas no Data Frame.
#'
## ------------------------------------------------------------------------
ted_talks_recentes %>%
mutate( year = year( film_date )) %>%
group_by(year) %>%
mutate(low = mean(languages) - 2 * sd(languages), hi = mean(languages) + 2 * sd(languages)) %>%
ungroup() %>%
ggplot( aes( x = year, y = languages, ymin = low, ymax = hi )) +
geom_ribbon(fill = "lightgray", alpha = 0.5) +
geom_jitter(alpha = .2, height = 0, width = 0.3) +
scale_x_continuous( breaks = 2005:2017 ) +
scale_y_continuous( breaks = seq(from = -10, to = 80, by = 5 )) +
labs( x = "Ano de filmagem"
, y = "Quantidade de Línguas"
, title = "Evolução da quantidade de línguas por vídeo ao longo dos anos"
, subtitle = "Período considerado somente a partir de 2005. Dados ajustados para mínimo de 1 língua por apresentação.\n A faixa cinza correponde ao intervalo de 2 desvios padrão acima e abaixo da média, calculados ano a ano."
, caption = "Dados de TED Talks de https://www.kaggle.com/rounakbanik/ted-talks/data") +
theme_bw()
#'
#' ## Correlograma
#'
#' **ggcorrplot** possibilita visualizar as correlações entre variáveis numéricas. A matriz de correlações deve ser previamente calculada utilizando a função `cor`.
#'
## ------------------------------------------------------------------------
library(ggcorrplot)
corr <-
ted_talks_recentes %>%
select_if(is_numeric) %>%
mutate( duration = as.numeric(duration)
, published_date = as.numeric(published_date)
, film_date = as.numeric(film_date)) %>%
select(-event, -speaker_occupation) %>%
cor() %>% round(2)
ggcorrplot(corr, hc.order = TRUE, type = "lower", lab = TRUE)
#'
#'
#' ## Histograma
#'
#' > ATIVIDADE
#'
#' 1. Estude o material abaixo que explica a construção de histogramas
#'
#' - [http://flowingdata.com/2017/06/07/how-histograms-work/](http://flowingdata.com/2017/06/07/how-histograms-work/)
#' - [http://tinlizzie.org/histograms/](http://tinlizzie.org/histograms/)
#'
#' 2. Estude o help da função `geom_histogram`
scale_y_continuous(labels = scales::format_format(big.mark = ".", decimal.mark=",", scientific = FALSE)) +
labs( x = "Ano de filmagem"
, y = "Total de visualizações de apresentações"
, title = "Exemplo com geom_col"
, subtitle = "Exibição do total de visualizações de apresentações de um mesmo ano de filmagem") +
theme_bw()
#' 3. Crie um histograma da quantidade de visualizações multifacetado por ano de publicação, restrito aos anos entre 2012 e 2017.
ted_talks_recentes %>% filter(year(film_date) >= 2012) -> TED_FINAL
TED_FINAL %>% group_by(year(film_date)) %>%
summarise(ano = n()) %>%
ungroup()
TED_FINAL %>% mutate(ano = year(film_date)) -> TED_FINALL
TED_FINAL %>% ggplot(aes(x = views)) +
geom_histogram()
TED_FINALL %>% ggplot(aes(x = views)) +
geom_histogram(color="darkblue", fill="lightblue", bins = 100)
TED_FINALL %>% ggplot(aes(x = views)) +
geom_histogram(color="darkblue", fill="lightblue", bins = 100) +
scale_x_continuous(labels = scales::format_format(scientific = FALSE))
TED_FINALL %>% ggplot(aes(x = views)) +
geom_histogram(color="darkblue", fill="lightblue", bins = 100) +
scale_x_continuous(labels = scales::format_format(scientific = FALSE)) +
labs(title = "Histograma") +
theme_bw()
TED_FINALL %>% ggplot(aes(x = views)) +
geom_histogram(color="darkblue", fill="lightblue", bins = 100) +
scale_x_continuous(labels = scales::format_format(scientific = FALSE)) +
labs(title = "Histograma") +
theme_classic()
TED_FINALL %>% ggplot(aes(x = views)) +
geom_histogram(color="darkblue", fill="lightblue", bins = 50) +
scale_x_continuous(labels = scales::format_format(scientific = FALSE)) +
geom_vline(aes(xintercept=mean(views)), color="red",linetype="dashed") +
labs(title = "Histograma") +
theme_classic()
|
library(BioCircos)
### Name: BioCircosLineTrack
### Title: Create a track with lines to be added to a BioCircos tracklist
### Aliases: BioCircosLineTrack
### ** Examples
BioCircos(BioCircosLineTrack('LnId', rep(1,30), 2e+6*(1:100), log(1:100))
+ BioCircosBackgroundTrack('BGId'))
| /data/genthat_extracted_code/BioCircos/examples/BioCircosLineTrack.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 290 | r | library(BioCircos)
### Name: BioCircosLineTrack
### Title: Create a track with lines to be added to a BioCircos tracklist
### Aliases: BioCircosLineTrack
### ** Examples
BioCircos(BioCircosLineTrack('LnId', rep(1,30), 2e+6*(1:100), log(1:100))
+ BioCircosBackgroundTrack('BGId'))
|
library(plyr)
library(reshape2)
## Goals
## 1. each variable should be in one column
## 2. each observation of that variable should be in a diferent row
## 3. include ids to link tables together
## Merges the training and the test sets to create one data set.
root.dir <- "UCI HAR Dataset"
data.set <- list()
message("loading features.txt")
data.set$features <- read.table(paste(root.dir, "features.txt", sep="/"), col.names=c('id', 'name'), stringsAsFactors=FALSE)
message("loading activity_features.txt")
data.set$activity_labels <- read.table(paste(root.dir, "activity_labels.txt", sep="/"), col.names=c('id', 'Activity'))
message("loading test set")
data.set$test <- cbind(subject=read.table(paste(root.dir, "test", "subject_test.txt", sep="/"), col.names="Subject"),
y=read.table(paste(root.dir, "test", "y_test.txt", sep="/"), col.names="Activity.ID"),
x=read.table(paste(root.dir, "test", "x_test.txt", sep="/")))
message("loading train set")
data.set$train <- cbind(subject=read.table(paste(root.dir, "train", "subject_train.txt", sep="/"), col.names="Subject"),
y=read.table(paste(root.dir, "train", "y_train.txt", sep="/"), col.names="Activity.ID"),
x=read.table(paste(root.dir, "train", "X_train.txt", sep="/")))
rename.features <- function(col) {
col <- gsub("tBody", "Time.Body", col)
col <- gsub("tGravity", "Time.Gravity", col)
col <- gsub("fBody", "FFT.Body", col)
col <- gsub("fGravity", "FFT.Gravity", col)
col <- gsub("\\-mean\\(\\)\\-", ".Mean.", col)
col <- gsub("\\-std\\(\\)\\-", ".Std.", col)
col <- gsub("\\-mean\\(\\)", ".Mean", col)
col <- gsub("\\-std\\(\\)", ".Std", col)
return(col)
}
## Extracts only the measurements on the mean and standard deviation for each measurement.
tidy <- rbind(data.set$test, data.set$train)[,c(1, 2, grep("mean\\(|std\\(", data.set$features$name) + 2)]
## Uses descriptive activity names to name the activities in the data set
names(tidy) <- c("Subject", "Activity.ID", rename.features(data.set$features$name[grep("mean\\(|std\\(", data.set$features$name)]))
## Appropriately labels the data set with descriptive activity names.
tidy <- merge(tidy, data.set$activity_labels, by.x="Activity.ID", by.y="id")
tidy <- tidy[,!(names(tidy) %in% c("Activity.ID"))]
## Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy.mean <- ddply(melt(tidy, id.vars=c("Subject", "Activity")), .(Subject, Activity), summarise, MeanSamples=mean(value))
write.csv(tidy.mean, file = "tidy.mean.txt",row.names = FALSE)
write.csv(tidy, file = "tidy.txt",row.names = FALSE) | /run_analysis.R | no_license | ctkhushboos/GettingAndCleaningData | R | false | false | 2,714 | r | library(plyr)
library(reshape2)
## Goals
## 1. each variable should be in one column
## 2. each observation of that variable should be in a diferent row
## 3. include ids to link tables together
## Merges the training and the test sets to create one data set.
root.dir <- "UCI HAR Dataset"
data.set <- list()
message("loading features.txt")
data.set$features <- read.table(paste(root.dir, "features.txt", sep="/"), col.names=c('id', 'name'), stringsAsFactors=FALSE)
message("loading activity_features.txt")
data.set$activity_labels <- read.table(paste(root.dir, "activity_labels.txt", sep="/"), col.names=c('id', 'Activity'))
message("loading test set")
data.set$test <- cbind(subject=read.table(paste(root.dir, "test", "subject_test.txt", sep="/"), col.names="Subject"),
y=read.table(paste(root.dir, "test", "y_test.txt", sep="/"), col.names="Activity.ID"),
x=read.table(paste(root.dir, "test", "x_test.txt", sep="/")))
message("loading train set")
data.set$train <- cbind(subject=read.table(paste(root.dir, "train", "subject_train.txt", sep="/"), col.names="Subject"),
y=read.table(paste(root.dir, "train", "y_train.txt", sep="/"), col.names="Activity.ID"),
x=read.table(paste(root.dir, "train", "X_train.txt", sep="/")))
rename.features <- function(col) {
col <- gsub("tBody", "Time.Body", col)
col <- gsub("tGravity", "Time.Gravity", col)
col <- gsub("fBody", "FFT.Body", col)
col <- gsub("fGravity", "FFT.Gravity", col)
col <- gsub("\\-mean\\(\\)\\-", ".Mean.", col)
col <- gsub("\\-std\\(\\)\\-", ".Std.", col)
col <- gsub("\\-mean\\(\\)", ".Mean", col)
col <- gsub("\\-std\\(\\)", ".Std", col)
return(col)
}
## Extracts only the measurements on the mean and standard deviation for each measurement.
tidy <- rbind(data.set$test, data.set$train)[,c(1, 2, grep("mean\\(|std\\(", data.set$features$name) + 2)]
## Uses descriptive activity names to name the activities in the data set
names(tidy) <- c("Subject", "Activity.ID", rename.features(data.set$features$name[grep("mean\\(|std\\(", data.set$features$name)]))
## Appropriately labels the data set with descriptive activity names.
tidy <- merge(tidy, data.set$activity_labels, by.x="Activity.ID", by.y="id")
tidy <- tidy[,!(names(tidy) %in% c("Activity.ID"))]
## Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy.mean <- ddply(melt(tidy, id.vars=c("Subject", "Activity")), .(Subject, Activity), summarise, MeanSamples=mean(value))
write.csv(tidy.mean, file = "tidy.mean.txt",row.names = FALSE)
write.csv(tidy, file = "tidy.txt",row.names = FALSE) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-doc-waterpolygons.R
\docType{data}
\name{waterpolygons}
\alias{waterpolygons}
\title{Polygons of shoreline coordinates delineating 54 geographic subblocks in the Kitimat Fjord System}
\format{
A data frame with 4458 rows and 5 columns:
}
\source{
Written and compiled by Eric Keen
}
\usage{
waterpolygons
}
\description{
A dataframe of coordinates for the mapping 54 geographic miniblocks used to partition the Kitimat
Fjord System in Bangarang analyses.
}
\keyword{datasets}
| /man/waterpolygons.Rd | no_license | ericmkeen/bangarang | R | false | true | 560 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-doc-waterpolygons.R
\docType{data}
\name{waterpolygons}
\alias{waterpolygons}
\title{Polygons of shoreline coordinates delineating 54 geographic subblocks in the Kitimat Fjord System}
\format{
A data frame with 4458 rows and 5 columns:
}
\source{
Written and compiled by Eric Keen
}
\usage{
waterpolygons
}
\description{
A dataframe of coordinates for the mapping 54 geographic miniblocks used to partition the Kitimat
Fjord System in Bangarang analyses.
}
\keyword{datasets}
|
gpUpdateKernels <- function (model, X, X_u) {
jitter = 1e-6
if (model$approx == "ftc") {
## (dev note) In the long term, we should allow different kernels in each dimension here.
# browser()
model$K_uu = kernCompute(model$kern, X)
if ((!"isSpherical" %in% names(model)) || model$isSpherical) {
## Add inverse beta to diagonal if it exists.
if ("beta" %in% names(model) && length(model$beta)>0) {
model$K_uu[seq(1,length(model$K_uu),by= dim(model$K_uu)[1]+1)] =
model$K_uu[seq(1,length(model$K_uu),by= dim(model$K_uu)[1]+1)] + 1/model$beta
}
# browser()
invK = .jitCholInv(model$K_uu, silent=TRUE) ## pdinv + jitChol combined
model$invK_uu = invK$invM
model$logDetK_uu = 2* sum( log ( diag(invK$chol) ) )
} else {
model$invK_uu=list(); model$logDetK_uu=matrix(0,1,model$d)
for (i in 1:model$d) {
if ("beta" %in% names(model) && length(model$beta)>0) {
if (dim(as.matrix(model$beta))[2] == model$d)
betaAdd = model$beta[, i]
else
betaAdd = model$beta
model$K_uu[seq(1,length(model$K_uu),by= dim(model$K_uu)[1]+1)] =
model$K_uu[seq(1,length(model$K_uu),by= dim(model$K_uu)[1]+1)] + 1/betaAdd
}
ind = gpDataIndices(model, i)
invK = .jitCholInv(model$K_uu[ind,ind], silent=TRUE) ## pdinv + jitChol combined
model$invK_uu[[i]] = invK$invM
model$logDetK_uu[i] = 2* sum( log ( diag(invK$chol) ) )
}
}
} else if (model$approx %in% c("dtc", "dtcvar", "fitc", "pitc")) {
model$K_uu = kernCompute(model$kern, X_u)
if ((!"whiteVariance" %in% names(model$kern)) || model$kern$whiteVariance == 0) {
## There is no white noise term so add some jitter.
model$K_uu = model$K_uu + diag.spam(jitter, dim(model$K_uu)[1]) ## need 'spam'
#sparseDiag(matrix(jitter, dim(model$K_uu)[1], 1))
}
model$K_uf = kernCompute(model$kern, X_u, X)
invK = .jitCholInv(model$K_uu, silent=TRUE) ## pdinv + jitChol combined
model$invK_uu = invK$invM
model$logDetK_uu = 2* sum( log ( diag(invK$chol) ) )
}
if (model$approx %in% c("dtcvar", "fitc"))
model$diagK = kernDiagCompute(model$kern, X)
else if (model$approx == "pitc") {
if ((!"isSpherical" %in% names(model)) || model$isSpherical) {
model$K=list()
for (i in 1:length(model$blockEnd)) {
ind = gpBlockIndices(model, i)
model$K[[i]] = kernCompute(model$kern, X[ind, ,drop=FALSE])
}
} else {
model$K = matrix(0, length(model$blockEnd), model$d)
model$K = lapply(split(model$K,row(model$K)), split, 1:model$d)
for (j in 1:model$d) {
for (i in 1:length(model$blockEnd)) {
ind = gpDataIndices(model, j, i)
model$K[[i]][[j]] = kernCompute(model$kern, X[ind, ,drop=FALSE])
}
}
}
}
model = gpUpdateAD(model, X)
return (model)
}
| /R/gpUpdateKernels.R | no_license | SheffieldML/gptk | R | false | false | 2,789 | r | gpUpdateKernels <- function (model, X, X_u) {
jitter = 1e-6
if (model$approx == "ftc") {
## (dev note) In the long term, we should allow different kernels in each dimension here.
# browser()
model$K_uu = kernCompute(model$kern, X)
if ((!"isSpherical" %in% names(model)) || model$isSpherical) {
## Add inverse beta to diagonal if it exists.
if ("beta" %in% names(model) && length(model$beta)>0) {
model$K_uu[seq(1,length(model$K_uu),by= dim(model$K_uu)[1]+1)] =
model$K_uu[seq(1,length(model$K_uu),by= dim(model$K_uu)[1]+1)] + 1/model$beta
}
# browser()
invK = .jitCholInv(model$K_uu, silent=TRUE) ## pdinv + jitChol combined
model$invK_uu = invK$invM
model$logDetK_uu = 2* sum( log ( diag(invK$chol) ) )
} else {
model$invK_uu=list(); model$logDetK_uu=matrix(0,1,model$d)
for (i in 1:model$d) {
if ("beta" %in% names(model) && length(model$beta)>0) {
if (dim(as.matrix(model$beta))[2] == model$d)
betaAdd = model$beta[, i]
else
betaAdd = model$beta
model$K_uu[seq(1,length(model$K_uu),by= dim(model$K_uu)[1]+1)] =
model$K_uu[seq(1,length(model$K_uu),by= dim(model$K_uu)[1]+1)] + 1/betaAdd
}
ind = gpDataIndices(model, i)
invK = .jitCholInv(model$K_uu[ind,ind], silent=TRUE) ## pdinv + jitChol combined
model$invK_uu[[i]] = invK$invM
model$logDetK_uu[i] = 2* sum( log ( diag(invK$chol) ) )
}
}
} else if (model$approx %in% c("dtc", "dtcvar", "fitc", "pitc")) {
model$K_uu = kernCompute(model$kern, X_u)
if ((!"whiteVariance" %in% names(model$kern)) || model$kern$whiteVariance == 0) {
## There is no white noise term so add some jitter.
model$K_uu = model$K_uu + diag.spam(jitter, dim(model$K_uu)[1]) ## need 'spam'
#sparseDiag(matrix(jitter, dim(model$K_uu)[1], 1))
}
model$K_uf = kernCompute(model$kern, X_u, X)
invK = .jitCholInv(model$K_uu, silent=TRUE) ## pdinv + jitChol combined
model$invK_uu = invK$invM
model$logDetK_uu = 2* sum( log ( diag(invK$chol) ) )
}
if (model$approx %in% c("dtcvar", "fitc"))
model$diagK = kernDiagCompute(model$kern, X)
else if (model$approx == "pitc") {
if ((!"isSpherical" %in% names(model)) || model$isSpherical) {
model$K=list()
for (i in 1:length(model$blockEnd)) {
ind = gpBlockIndices(model, i)
model$K[[i]] = kernCompute(model$kern, X[ind, ,drop=FALSE])
}
} else {
model$K = matrix(0, length(model$blockEnd), model$d)
model$K = lapply(split(model$K,row(model$K)), split, 1:model$d)
for (j in 1:model$d) {
for (i in 1:length(model$blockEnd)) {
ind = gpDataIndices(model, j, i)
model$K[[i]][[j]] = kernCompute(model$kern, X[ind, ,drop=FALSE])
}
}
}
}
model = gpUpdateAD(model, X)
return (model)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/envelopes.r
\name{plot.combined_global_envelope}
\alias{plot.combined_global_envelope}
\title{Plot method for the class 'combined_global_envelope'}
\usage{
\method{plot}{combined_global_envelope}(
x,
labels,
scales,
sign.col = "red",
ncol = 2 + 1 * (length(x) == 3),
digits = 3,
level = 1,
...
)
}
\arguments{
\item{x}{An 'combined_global_envelope' object}
\item{labels}{A character vector of suitable length.
If \code{dotplot = TRUE} (for the level 2 test), then labels for the tests at x-axis.
Otherwise labels for the separate plots.}
\item{scales}{See \code{\link[ggplot2]{facet_wrap}}.
Use \code{scales = "free"} when the scales of the functions in the global envelope
vary. \code{scales = "fixed"} is a good choice, when you want the same y-axis for all components.
A sensible default based on r-values exists.}
\item{sign.col}{The color for the observed curve when outside the global envelope
(significant regions). Default to "red". Setting the color to \code{NULL} corresponds
to no coloring. If the object contains several envelopes, the coloring is done for
the widest one.}
\item{ncol}{The maximum number of columns for the figures.
Default 2 or 3, if the length of x equals 3.
(Relates to the number of curve_sets that have been combined.)}
\item{digits}{The number of digits used for printing the p-value or p-interval
in the default main.}
\item{level}{1 or 2. In the case of two-step combined tests (with several test functions),
two different plots are available:
1 for plotting the combined global envelopes (default and most often wanted) or
2 for plotting the second level test result.}
\item{...}{Ignored.}
}
\description{
This function provides plots for combined global envelopes.
}
\details{
Plotting method for the class 'combined_global_envelope', i.e. combined envelopes for
1d functions.
}
\seealso{
\code{\link{central_region}}
}
| /man/plot.combined_global_envelope.Rd | no_license | myllym/GET | R | false | true | 1,960 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/envelopes.r
\name{plot.combined_global_envelope}
\alias{plot.combined_global_envelope}
\title{Plot method for the class 'combined_global_envelope'}
\usage{
\method{plot}{combined_global_envelope}(
x,
labels,
scales,
sign.col = "red",
ncol = 2 + 1 * (length(x) == 3),
digits = 3,
level = 1,
...
)
}
\arguments{
\item{x}{An 'combined_global_envelope' object}
\item{labels}{A character vector of suitable length.
If \code{dotplot = TRUE} (for the level 2 test), then labels for the tests at x-axis.
Otherwise labels for the separate plots.}
\item{scales}{See \code{\link[ggplot2]{facet_wrap}}.
Use \code{scales = "free"} when the scales of the functions in the global envelope
vary. \code{scales = "fixed"} is a good choice, when you want the same y-axis for all components.
A sensible default based on r-values exists.}
\item{sign.col}{The color for the observed curve when outside the global envelope
(significant regions). Default to "red". Setting the color to \code{NULL} corresponds
to no coloring. If the object contains several envelopes, the coloring is done for
the widest one.}
\item{ncol}{The maximum number of columns for the figures.
Default 2 or 3, if the length of x equals 3.
(Relates to the number of curve_sets that have been combined.)}
\item{digits}{The number of digits used for printing the p-value or p-interval
in the default main.}
\item{level}{1 or 2. In the case of two-step combined tests (with several test functions),
two different plots are available:
1 for plotting the combined global envelopes (default and most often wanted) or
2 for plotting the second level test result.}
\item{...}{Ignored.}
}
\description{
This function provides plots for combined global envelopes.
}
\details{
Plotting method for the class 'combined_global_envelope', i.e. combined envelopes for
1d functions.
}
\seealso{
\code{\link{central_region}}
}
|
##' open KEGG pathway with web browser
##'
##'
##' @title browseKEGG
##' @param x an instance of enrichResult or gseaResult
##' @param pathID pathway ID
##' @return url
##' @importFrom utils browseURL
##' @export
##' @author Guangchuang Yu
browseKEGG <- function(x, pathID) {
url <- paste0("https://www.kegg.jp/kegg-bin/show_pathway?", pathID, '/', x[pathID, "geneID"])
browseURL(url)
invisible(url)
}
##' search kegg organism, listed in https://www.genome.jp/kegg/catalog/org_list.html
##'
##'
##' @title search_kegg_organism
##' @param str string
##' @param by one of 'kegg.code', 'scientific_name' and 'common_name'
##' @param ignore.case TRUE or FALSE
##' @param use_internal_data logical, use kegg_species.rda or latest online KEGG data
##' @return data.frame
##' @export
##' @author Guangchuang Yu
search_kegg_organism <- function(str, by="scientific_name", ignore.case=FALSE,
use_internal_data = TRUE) {
if (use_internal_data) {
by <- match.arg(by, c("kegg_code", "scientific_name", "common_name"))
kegg_species <- kegg_species_data()
# Message <- paste("You are using the internal data. ",
# "If you want to use the latest data",
# "and your internet speed is fast enough, ",
# "please set use_internal_data = FALSE")
# message(Message)
} else {
kegg_species <- get_kegg_species()
}
idx <- grep(str, kegg_species[, by], ignore.case = ignore.case)
kegg_species[idx,]
}
kegg_species_data <- function() {
utils::data(list="kegg_species", package="clusterProfiler")
get("kegg_species", envir = .GlobalEnv)
}
get_kegg_species <- function(save = FALSE) {
url <- "https://rest.kegg.jp/list/organism"
species <- read.table(url, fill = TRUE, sep = "\t", header = F, quote = "")
species <- species[, -1]
scientific_name <- gsub(" \\(.*", "", species[,2])
common_name <- gsub(".*\\(", "", species[,2])
common_name <- gsub("\\)", "", common_name)
kegg_species <- data.frame(kegg_code = species[, 1],
scientific_name = scientific_name,
common_name = common_name)
if (save) save(kegg_species, file="kegg_species.rda")
invisible(kegg_species)
}
## get_kegg_species <- function() {
## pkg <- "XML"
## requireNamespace(pkg)
## readHTMLTable <- eval(parse(text="XML::readHTMLTable"))
## x <- readHTMLTable("https://www.genome.jp/kegg/catalog/org_list.html")
## y <- get_species_name(x[[2]], "Eukaryotes")
## y2 <- get_species_name(x[[3]], 'Prokaryotes')
## sci_name <- gsub(" \\(.*$", '', y[,2])
## com_name <- gsub("[^\\(]+ \\(([^\\)]+)\\)$", '\\1', y[,2])
## eu <- data.frame(kegg_code=unlist(y[,1]),
## scientific_name = sci_name,
## common_name = com_name,
## stringsAsFactors = FALSE)
## pr <- data.frame(kegg_code=unlist(y2[,1]),
## scientific_name = unlist(y2[,2]),
## common_name = NA,
## stringsAsFactors = FALSE)
## kegg_species <- rbind(eu, pr)
## save(kegg_species, file="kegg_species.rda")
## invisible(kegg_species)
## }
## get_species_name <- function(y, table) {
## idx <- get_species_name_idx(y, table)
## t(sapply(1:nrow(idx), function(i) {
## y[] = lapply(y, as.character)
## y[i, idx[i,]]
## }))
## }
## get_species_name_idx <- function(y, table='Eukaryotes') {
## table <- match.arg(table, c("Eukaryotes", "Prokaryotes"))
## t(apply(y, 1, function(x) {
## ii <- which(!is.na(x))
## n <- length(ii)
## if (table == "Eukaryotes") {
## return(ii[(n-2):(n-1)])
## } else {
## return(ii[(n-3):(n-2)])
## }
## }))
## }
##' @importFrom downloader download
kegg_rest <- function(rest_url) {
message('Reading KEGG annotation online: "', rest_url, '"...')
f <- tempfile()
dl <- mydownload(rest_url, destfile = f)
if (is.null(dl)) {
message("fail to download KEGG data...")
return(NULL)
}
content <- readLines(f)
content %<>% strsplit(., "\t") %>% do.call('rbind', .)
res <- data.frame(from=content[,1],
to=content[,2])
return(res)
}
## https://www.genome.jp/kegg/rest/keggapi.html
## kegg_link('hsa', 'pathway')
kegg_link <- function(target_db, source_db) {
url <- paste0("https://rest.kegg.jp/link/", target_db, "/", source_db, collapse="")
kegg_rest(url)
}
kegg_list <- function(db) {
url <- paste0("https://rest.kegg.jp/list/", db, collapse="")
kegg_rest(url)
}
##' convert ko ID to descriptive name
##'
##'
##' @title ko2name
##' @param ko ko ID
##' @return data.frame
##' @export
##' @author guangchuang yu
ko2name <- function(ko) {
p <- kegg_list('pathway')
ko2 <- gsub("^ko", "path:map", ko)
ko.df <- data.frame(ko=ko, from=ko2)
res <- merge(ko.df, p, by = 'from', all.x=TRUE)
res <- res[, c("ko", "to")]
colnames(res) <- c("ko", "name")
return(res)
}
| /R/kegg-utilities.R | no_license | altairwei/clusterProfiler | R | false | false | 5,201 | r |
##' open KEGG pathway with web browser
##'
##'
##' @title browseKEGG
##' @param x an instance of enrichResult or gseaResult
##' @param pathID pathway ID
##' @return url
##' @importFrom utils browseURL
##' @export
##' @author Guangchuang Yu
browseKEGG <- function(x, pathID) {
url <- paste0("https://www.kegg.jp/kegg-bin/show_pathway?", pathID, '/', x[pathID, "geneID"])
browseURL(url)
invisible(url)
}
##' search kegg organism, listed in https://www.genome.jp/kegg/catalog/org_list.html
##'
##'
##' @title search_kegg_organism
##' @param str string
##' @param by one of 'kegg.code', 'scientific_name' and 'common_name'
##' @param ignore.case TRUE or FALSE
##' @param use_internal_data logical, use kegg_species.rda or latest online KEGG data
##' @return data.frame
##' @export
##' @author Guangchuang Yu
search_kegg_organism <- function(str, by="scientific_name", ignore.case=FALSE,
use_internal_data = TRUE) {
if (use_internal_data) {
by <- match.arg(by, c("kegg_code", "scientific_name", "common_name"))
kegg_species <- kegg_species_data()
# Message <- paste("You are using the internal data. ",
# "If you want to use the latest data",
# "and your internet speed is fast enough, ",
# "please set use_internal_data = FALSE")
# message(Message)
} else {
kegg_species <- get_kegg_species()
}
idx <- grep(str, kegg_species[, by], ignore.case = ignore.case)
kegg_species[idx,]
}
kegg_species_data <- function() {
utils::data(list="kegg_species", package="clusterProfiler")
get("kegg_species", envir = .GlobalEnv)
}
get_kegg_species <- function(save = FALSE) {
url <- "https://rest.kegg.jp/list/organism"
species <- read.table(url, fill = TRUE, sep = "\t", header = F, quote = "")
species <- species[, -1]
scientific_name <- gsub(" \\(.*", "", species[,2])
common_name <- gsub(".*\\(", "", species[,2])
common_name <- gsub("\\)", "", common_name)
kegg_species <- data.frame(kegg_code = species[, 1],
scientific_name = scientific_name,
common_name = common_name)
if (save) save(kegg_species, file="kegg_species.rda")
invisible(kegg_species)
}
## get_kegg_species <- function() {
## pkg <- "XML"
## requireNamespace(pkg)
## readHTMLTable <- eval(parse(text="XML::readHTMLTable"))
## x <- readHTMLTable("https://www.genome.jp/kegg/catalog/org_list.html")
## y <- get_species_name(x[[2]], "Eukaryotes")
## y2 <- get_species_name(x[[3]], 'Prokaryotes')
## sci_name <- gsub(" \\(.*$", '', y[,2])
## com_name <- gsub("[^\\(]+ \\(([^\\)]+)\\)$", '\\1', y[,2])
## eu <- data.frame(kegg_code=unlist(y[,1]),
## scientific_name = sci_name,
## common_name = com_name,
## stringsAsFactors = FALSE)
## pr <- data.frame(kegg_code=unlist(y2[,1]),
## scientific_name = unlist(y2[,2]),
## common_name = NA,
## stringsAsFactors = FALSE)
## kegg_species <- rbind(eu, pr)
## save(kegg_species, file="kegg_species.rda")
## invisible(kegg_species)
## }
## get_species_name <- function(y, table) {
## idx <- get_species_name_idx(y, table)
## t(sapply(1:nrow(idx), function(i) {
## y[] = lapply(y, as.character)
## y[i, idx[i,]]
## }))
## }
## get_species_name_idx <- function(y, table='Eukaryotes') {
## table <- match.arg(table, c("Eukaryotes", "Prokaryotes"))
## t(apply(y, 1, function(x) {
## ii <- which(!is.na(x))
## n <- length(ii)
## if (table == "Eukaryotes") {
## return(ii[(n-2):(n-1)])
## } else {
## return(ii[(n-3):(n-2)])
## }
## }))
## }
##' @importFrom downloader download
kegg_rest <- function(rest_url) {
message('Reading KEGG annotation online: "', rest_url, '"...')
f <- tempfile()
dl <- mydownload(rest_url, destfile = f)
if (is.null(dl)) {
message("fail to download KEGG data...")
return(NULL)
}
content <- readLines(f)
content %<>% strsplit(., "\t") %>% do.call('rbind', .)
res <- data.frame(from=content[,1],
to=content[,2])
return(res)
}
## https://www.genome.jp/kegg/rest/keggapi.html
## kegg_link('hsa', 'pathway')
kegg_link <- function(target_db, source_db) {
url <- paste0("https://rest.kegg.jp/link/", target_db, "/", source_db, collapse="")
kegg_rest(url)
}
kegg_list <- function(db) {
url <- paste0("https://rest.kegg.jp/list/", db, collapse="")
kegg_rest(url)
}
##' convert ko ID to descriptive name
##'
##'
##' @title ko2name
##' @param ko ko ID
##' @return data.frame
##' @export
##' @author guangchuang yu
ko2name <- function(ko) {
p <- kegg_list('pathway')
ko2 <- gsub("^ko", "path:map", ko)
ko.df <- data.frame(ko=ko, from=ko2)
res <- merge(ko.df, p, by = 'from', all.x=TRUE)
res <- res[, c("ko", "to")]
colnames(res) <- c("ko", "name")
return(res)
}
|
#!/usr/bin/env Rscript
main <- function(input, output, log, threads) {
# Log function
out <- file(log$out, open = "wt")
err <- file(log$err, open = "wt")
sink(out, type = "output")
sink(err, type = "message")
# Script function
library(BiocParallel)
library(scuttle)
library(velociraptor)
sce <- readRDS(input$rds[1])
vel <- readRDS(input$rds[2])
row <- intersect(rownames(sce), rownames(vel))
col <- intersect(colnames(sce), colnames(vel))
sce <- sce[row, col]
vel <- vel[row, col]
assay(sce, "spliced") <- assay(vel, "spliced")
assay(sce, "unspliced") <- assay(vel, "unspliced")
hvg <- rowSubset(sce, "HVG")
par <- MulticoreParam(workers = threads)
sce <- scvelo(x = sce, subset.row = hvg, use.dimred = "PCA", BPPARAM = par)
saveRDS(sce, file = output$rds)
}
main(snakemake@input, snakemake@output, snakemake@log, snakemake@threads)
| /workflow/scripts/trajectory/scvelo.R | permissive | jma1991/DiasTailbudData | R | false | false | 942 | r | #!/usr/bin/env Rscript
main <- function(input, output, log, threads) {
# Log function
out <- file(log$out, open = "wt")
err <- file(log$err, open = "wt")
sink(out, type = "output")
sink(err, type = "message")
# Script function
library(BiocParallel)
library(scuttle)
library(velociraptor)
sce <- readRDS(input$rds[1])
vel <- readRDS(input$rds[2])
row <- intersect(rownames(sce), rownames(vel))
col <- intersect(colnames(sce), colnames(vel))
sce <- sce[row, col]
vel <- vel[row, col]
assay(sce, "spliced") <- assay(vel, "spliced")
assay(sce, "unspliced") <- assay(vel, "unspliced")
hvg <- rowSubset(sce, "HVG")
par <- MulticoreParam(workers = threads)
sce <- scvelo(x = sce, subset.row = hvg, use.dimred = "PCA", BPPARAM = par)
saveRDS(sce, file = output$rds)
}
main(snakemake@input, snakemake@output, snakemake@log, snakemake@threads)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/path_paint.R
\name{getHumanPathIds}
\alias{getHumanPathIds}
\title{Pull out human path ids from the reactome.db annotation package}
\usage{
getHumanPathIds()
}
\value{
\code{list}, names are path names, values are db ids.
}
\description{
Get human path ids from the reactome.db annotation package
}
| /man/getHumanPathIds.Rd | no_license | biodev/packageDir | R | false | true | 377 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/path_paint.R
\name{getHumanPathIds}
\alias{getHumanPathIds}
\title{Pull out human path ids from the reactome.db annotation package}
\usage{
getHumanPathIds()
}
\value{
\code{list}, names are path names, values are db ids.
}
\description{
Get human path ids from the reactome.db annotation package
}
|
#### read me ####
# the purpose of this script is to combine all monthly temperature records from HOBO sensors.
#### load libraries ####
library(lubridate)
library(tidyverse)
library(forecast)
library(tsibble)
library(dplyr)
library(nlme)
library(lme4)
library(beepr)
library(bbmle)
library(MuMIn)
library(gridExtra)
library(car)
library(visreg)
#### load data ####
LJWB_file_list = list.files(path="C:/Users/Brionna/OneDrive - University of New Mexico/Classes/EPS545_BIO502/VCNP/VCNP_Repo/processed data/monthly",
recursive=F,
full.names=TRUE)
LJWB_file_list_short = list.files(path="C:/Users/Brionna/OneDrive - University of New Mexico/Classes/EPS545_BIO502/VCNP/VCNP_Repo/processed data/monthly",
recursive=F,
full.names=FALSE) %>%
str_replace("monthly_temp.csv", "")
#### manipulate_data ####
View(LJWB_file_list_short[[3]])
p<-tibble(path = LJWB_file_list) %>%
mutate(file_contents = map(path, read_csv)) %>%
unnest(file_contents)
HiddenValley_June<-filter(p,siteID=="EastForkJemezHiddenValley")
HiddenValley_June<-filter(HiddenValley_June,mo=="6")
#### linear trends ####
# add simple time steps to df
HiddenValley_June$t = c(1:nrow(HiddenValley_June))
mod = lm(Value.mn ~ t, HiddenValley_June)
summary(mod)
visreg(mod,"t")
confint(mod, 't', level=0.95)
## diagnostics ##
Acf(resid(mod))
forecast::checkresiduals(mod)
####test and calculate trends - nlme::gls####
# ask auto.arima what it thinks the autocorrelation structure is
auto.arima(HiddenValley_June$Value.mn)
# fit AR(1) regression model with time as a predictor
mod_Ar1 = gls(Value.mn ~ t, data=HiddenValley_June, correlation=corCAR1(), method="ML")
# fit some other candidate structures - not sure if CorARMA is appropriate for unevenly spaced data- but ultimately used the first model that uses CorCAR to account for the uneven spacing
mod_AMRAp1q1 = gls(Value.mn ~ t, data=HiddenValley_June, correlation=corARMA(p=1,q=1), method="ML")
mod_AMRAp2 = gls(Value.mn ~ t, data=HiddenValley_June, correlation=corARMA(p=2), method="ML")
# compare models with AICc (for small dataset) (looking for value less than 2)
bbmle::AICctab(mod_Ar1,mod_AMRAp1q1,mod_AMRAp2)
summary(mod_Ar1)
# intervals() for nlme is equivelant to confint() for lm
intervals(mod_Ar1)
visreg(mod_Ar1,"t")
Acf(resid(mod_Ar1))
# extract and assess residuals
par(mfrow=c(1,3))
Acf(resid(mod_Ar1, type = "normalized"), main="GLS AR(1) model residuals")
plot(resid(mod_Ar1, type = "normalized")~c(1:length(HiddenValley_June$t)), main="GLS AR(1) model residuals"); abline(h=0)
qqnorm(resid(mod_Ar1, type = "normalized"), main="GLS AR(1) model residuals", pch=16,
xlab=paste("shapiro test: ", round(shapiro.test(resid(mod_Ar1, type = "normalized"))$statistic,2))); qqline(resid(mod_Ar1, type = "normalized"))
# exctract parameter estimates for comparison with MARSS
mod_Ar1.phi = coef(mod_Ar1$modelStruct[[1]], unconstrained=FALSE)
ests.gls = c(b=mod_Ar1.phi, alpha=coef(mod_Ar1)[1],
time=coef(mod_Ar1)[2],
logLik=logLik(mod_Ar1))
| /R_scripts/04_gls_HiddenValleyJune.R | no_license | UNM-Stakeholder-driven-Data-Analysis/VCNP_Repo | R | false | false | 3,145 | r | #### read me ####
# the purpose of this script is to combine all monthly temperature records from HOBO sensors.
#### load libraries ####
library(lubridate)
library(tidyverse)
library(forecast)
library(tsibble)
library(dplyr)
library(nlme)
library(lme4)
library(beepr)
library(bbmle)
library(MuMIn)
library(gridExtra)
library(car)
library(visreg)
#### load data ####
LJWB_file_list = list.files(path="C:/Users/Brionna/OneDrive - University of New Mexico/Classes/EPS545_BIO502/VCNP/VCNP_Repo/processed data/monthly",
recursive=F,
full.names=TRUE)
LJWB_file_list_short = list.files(path="C:/Users/Brionna/OneDrive - University of New Mexico/Classes/EPS545_BIO502/VCNP/VCNP_Repo/processed data/monthly",
recursive=F,
full.names=FALSE) %>%
str_replace("monthly_temp.csv", "")
#### manipulate_data ####
View(LJWB_file_list_short[[3]])
p<-tibble(path = LJWB_file_list) %>%
mutate(file_contents = map(path, read_csv)) %>%
unnest(file_contents)
HiddenValley_June<-filter(p,siteID=="EastForkJemezHiddenValley")
HiddenValley_June<-filter(HiddenValley_June,mo=="6")
#### linear trends ####
# add simple time steps to df
HiddenValley_June$t = c(1:nrow(HiddenValley_June))
mod = lm(Value.mn ~ t, HiddenValley_June)
summary(mod)
visreg(mod,"t")
confint(mod, 't', level=0.95)
## diagnostics ##
Acf(resid(mod))
forecast::checkresiduals(mod)
####test and calculate trends - nlme::gls####
# ask auto.arima what it thinks the autocorrelation structure is
auto.arima(HiddenValley_June$Value.mn)
# fit AR(1) regression model with time as a predictor
mod_Ar1 = gls(Value.mn ~ t, data=HiddenValley_June, correlation=corCAR1(), method="ML")
# fit some other candidate structures - not sure if CorARMA is appropriate for unevenly spaced data- but ultimately used the first model that uses CorCAR to account for the uneven spacing
mod_AMRAp1q1 = gls(Value.mn ~ t, data=HiddenValley_June, correlation=corARMA(p=1,q=1), method="ML")
mod_AMRAp2 = gls(Value.mn ~ t, data=HiddenValley_June, correlation=corARMA(p=2), method="ML")
# compare models with AICc (for small dataset) (looking for value less than 2)
bbmle::AICctab(mod_Ar1,mod_AMRAp1q1,mod_AMRAp2)
summary(mod_Ar1)
# intervals() for nlme is equivelant to confint() for lm
intervals(mod_Ar1)
visreg(mod_Ar1,"t")
Acf(resid(mod_Ar1))
# extract and assess residuals
par(mfrow=c(1,3))
Acf(resid(mod_Ar1, type = "normalized"), main="GLS AR(1) model residuals")
plot(resid(mod_Ar1, type = "normalized")~c(1:length(HiddenValley_June$t)), main="GLS AR(1) model residuals"); abline(h=0)
qqnorm(resid(mod_Ar1, type = "normalized"), main="GLS AR(1) model residuals", pch=16,
xlab=paste("shapiro test: ", round(shapiro.test(resid(mod_Ar1, type = "normalized"))$statistic,2))); qqline(resid(mod_Ar1, type = "normalized"))
# exctract parameter estimates for comparison with MARSS
mod_Ar1.phi = coef(mod_Ar1$modelStruct[[1]], unconstrained=FALSE)
ests.gls = c(b=mod_Ar1.phi, alpha=coef(mod_Ar1)[1],
time=coef(mod_Ar1)[2],
logLik=logLik(mod_Ar1))
|
#' @export
SAVI <- function() {
appDir <- system.file("SAVI", package = "SAVI")
if (appDir == "") {
stop("Could not find SAVI directory. Try re-installing `SAVI`.", call. = FALSE)
}
shiny::runApp(appDir, display.mode = "normal", launch.browser = TRUE)
} | /R/SAVI.R | permissive | christophe-stevens/SAVI-package | R | false | false | 266 | r | #' @export
SAVI <- function() {
appDir <- system.file("SAVI", package = "SAVI")
if (appDir == "") {
stop("Could not find SAVI directory. Try re-installing `SAVI`.", call. = FALSE)
}
shiny::runApp(appDir, display.mode = "normal", launch.browser = TRUE)
} |
random_weight <- function(n_to,n_from){
matrix( rnorm( n_to * n_from ), n_to, n_from)
}
weight_list <- function(levels){
weight_list <- list()
for( i in seq( 2, length( levels) ) ){
r <- random_weight( levels[ i ], levels[i - 1] )
weight_list[[i-1]] <- r
}
return( weight_list )
}
bias_list <- function(levels){
bias_list <- list()
for( i in seq( 2, length( levels ) ) ){
b <- random_weight( levels[ i ], 1)
bias_list[[i-1]] <- b
}
return( bias_list )
}
activate <- function( x ){
1/( 1 + exp( -x ) )
}
activate_prime <- function( x ){
activate( x ) * (1 - activate( x ) )
}
z <- function(w,a_prior,bias){
( w %*% a_prior ) + bias
}
`%.%` <- function( X,Y ){ # Be explicit about Hadamard product.
X * Y
}
forward_propagate <- function( x,weights_list,bias_list ){
zs <- list()
as <- list()
x_list <- list()
levels <- length( weights_list )
zs[[1]] <- z( weights_list[[1]], x, bias_list[[1]] )
as[[1]] <- activate( zs[[1]] )
x_list[[1]] <- x
if (levels != 1){
for( i in seq(2, levels ) ){
zs[[i]] <- z( weights_list[[i]], as.matrix( as[[i-1]] ), bias_list[[i]] )
as[[i]] <- activate( zs[[i]] )
}
}
return( list( zs=zs,as=as,x=x_list ) )
}
output_error <- function( z,r ){
(activate(z) - r) %.% activate_prime( z )
}
output_error_one_layer <- function(z, r){
activate(z) - r
}
hidden_error <- function( w,delta,z ){
( t( w ) %*% delta ) %.% activate_prime( z )
}
backward_propagate <- function( res,weight_list,bias_list,r ){
L <- length( weight_list )
deltas <- list()
if (L == 1){
deltas[[1]] <- output_error_one_layer( res$zs[[1]],r )
return( deltas )
}
else{
deltas[[L]] <- output_error( res$zs[[L]],r )
for( i in seq( L-1,1 ) ){
deltas[[i]] <- hidden_error( weight_list[[i+1]], deltas[[i+1]], res$zs[[i]] )
}
}
return( deltas )
}
gradient_descent <- function( res,deltas,weight_list,bias_list,eta ){
L <- length( weight_list )
if (L == 1){
weight_list[[1]] <- weight_list[[1]] - eta * deltas[[1]] %*% t(res$x[[1]])
bias_list[[1]] <- bias_list[[1]] - eta * deltas[[1]]
}
else {
for( i in seq( L,2 ) ){
weight_list[[i]] <- weight_list[[i]] - eta * deltas[[i]] %*% t( res$as[[i-1]] )
bias_list[[i]] <- bias_list[[i]] - eta * deltas[[i]]
}
}
return( list( weight_list=weight_list,bias_list=bias_list ) )
}
mini_batch <- function( X,batch_size,n_instances ){
take <- sample( c( rep( TRUE, batch_size ), rep(FALSE, n_instances - batch_size ) ) )
return( X[,take,drop=FALSE] )
}
stochastic_backward_propagation <- function(X,R,mlp_dimensions,eta=1){
W <- weight_list(mlp_dimensions)
B <- bias_list(mlp_dimensions)
n_instances <- ncol( X )
batch_size <- ifelse( floor( n_instances / 10) > 1, floor( n_instances ), 1)
j <- 0
converge <- F
while( j < 100000 ){
#batch <- mini_batch( X,batch_size,n_instances )
for( i in seq( 1,n_instances ) ){
r <- R[, i ,drop=FALSE]
forward <- forward_propagate( as.matrix( X[,i] ), W, B)
deltas <- backward_propagate( forward , W, B, r)
backward <- gradient_descent( forward, deltas, W, B, eta)
W <- backward$weight_list
B <- backward$bias_list
}
j <- j + 1
}
return( list( W=W,B=B ) )
}
| /perceptron.R | no_license | wdonahoe/backpropagation | R | false | false | 3,354 | r | random_weight <- function(n_to,n_from){
matrix( rnorm( n_to * n_from ), n_to, n_from)
}
weight_list <- function(levels){
weight_list <- list()
for( i in seq( 2, length( levels) ) ){
r <- random_weight( levels[ i ], levels[i - 1] )
weight_list[[i-1]] <- r
}
return( weight_list )
}
bias_list <- function(levels){
bias_list <- list()
for( i in seq( 2, length( levels ) ) ){
b <- random_weight( levels[ i ], 1)
bias_list[[i-1]] <- b
}
return( bias_list )
}
activate <- function( x ){
1/( 1 + exp( -x ) )
}
activate_prime <- function( x ){
activate( x ) * (1 - activate( x ) )
}
z <- function(w,a_prior,bias){
( w %*% a_prior ) + bias
}
`%.%` <- function( X,Y ){ # Be explicit about Hadamard product.
X * Y
}
forward_propagate <- function( x,weights_list,bias_list ){
zs <- list()
as <- list()
x_list <- list()
levels <- length( weights_list )
zs[[1]] <- z( weights_list[[1]], x, bias_list[[1]] )
as[[1]] <- activate( zs[[1]] )
x_list[[1]] <- x
if (levels != 1){
for( i in seq(2, levels ) ){
zs[[i]] <- z( weights_list[[i]], as.matrix( as[[i-1]] ), bias_list[[i]] )
as[[i]] <- activate( zs[[i]] )
}
}
return( list( zs=zs,as=as,x=x_list ) )
}
output_error <- function( z,r ){
(activate(z) - r) %.% activate_prime( z )
}
output_error_one_layer <- function(z, r){
activate(z) - r
}
hidden_error <- function( w,delta,z ){
( t( w ) %*% delta ) %.% activate_prime( z )
}
backward_propagate <- function( res,weight_list,bias_list,r ){
L <- length( weight_list )
deltas <- list()
if (L == 1){
deltas[[1]] <- output_error_one_layer( res$zs[[1]],r )
return( deltas )
}
else{
deltas[[L]] <- output_error( res$zs[[L]],r )
for( i in seq( L-1,1 ) ){
deltas[[i]] <- hidden_error( weight_list[[i+1]], deltas[[i+1]], res$zs[[i]] )
}
}
return( deltas )
}
gradient_descent <- function( res,deltas,weight_list,bias_list,eta ){
L <- length( weight_list )
if (L == 1){
weight_list[[1]] <- weight_list[[1]] - eta * deltas[[1]] %*% t(res$x[[1]])
bias_list[[1]] <- bias_list[[1]] - eta * deltas[[1]]
}
else {
for( i in seq( L,2 ) ){
weight_list[[i]] <- weight_list[[i]] - eta * deltas[[i]] %*% t( res$as[[i-1]] )
bias_list[[i]] <- bias_list[[i]] - eta * deltas[[i]]
}
}
return( list( weight_list=weight_list,bias_list=bias_list ) )
}
mini_batch <- function( X,batch_size,n_instances ){
take <- sample( c( rep( TRUE, batch_size ), rep(FALSE, n_instances - batch_size ) ) )
return( X[,take,drop=FALSE] )
}
stochastic_backward_propagation <- function(X,R,mlp_dimensions,eta=1){
W <- weight_list(mlp_dimensions)
B <- bias_list(mlp_dimensions)
n_instances <- ncol( X )
batch_size <- ifelse( floor( n_instances / 10) > 1, floor( n_instances ), 1)
j <- 0
converge <- F
while( j < 100000 ){
#batch <- mini_batch( X,batch_size,n_instances )
for( i in seq( 1,n_instances ) ){
r <- R[, i ,drop=FALSE]
forward <- forward_propagate( as.matrix( X[,i] ), W, B)
deltas <- backward_propagate( forward , W, B, r)
backward <- gradient_descent( forward, deltas, W, B, eta)
W <- backward$weight_list
B <- backward$bias_list
}
j <- j + 1
}
return( list( W=W,B=B ) )
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/gaussAlgebra.R
\name{poly}
\alias{poly}
\title{Transforms polynomial coefficients into a 1D gAlg function}
\usage{
poly(...)
}
\arguments{
\item{...}{coefficients of a polynomial}
}
\description{
Transforms polynomial coefficients into a 1D gAlg function
}
| /man/poly.Rd | no_license | kmarchlewski/gaussAlgebra | R | false | false | 344 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/gaussAlgebra.R
\name{poly}
\alias{poly}
\title{Transforms polynomial coefficients into a 1D gAlg function}
\usage{
poly(...)
}
\arguments{
\item{...}{coefficients of a polynomial}
}
\description{
Transforms polynomial coefficients into a 1D gAlg function
}
|
### Getting and Cleaning Data
### Course Project 1
### Create Tidy data from a raw dataset
# Download & unzip raw data file
dataURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(dataURL, "UCI HAR Dataset.zip", quiet = TRUE)
unzip("UCI HAR Dataset.zip")
# Get indexes of columns which contain mean/stdev info
features <- read.table("./UCI HAR Dataset/features.txt", colClasses = c("NULL", "character"))
rawIndx <- sapply(features[[1]], function(x) grepl("mean()", x, fixed = TRUE), USE.NAMES = FALSE) |
sapply(features[[1]], function(x) grepl("std()" , x, fixed = TRUE), USE.NAMES = FALSE)
# Get main headers of mean/stdev info & format out "-" & "()"
features <- t(features[rawIndx,]) # headers
features <- sub("-", "_", features, fixed = TRUE)
features <- sub("-", "_", features, fixed = TRUE)
features <- sub("()", "", features, fixed = TRUE)
# Get main data of mean/stdev info
rawIndx <- ifelse(rawIndx, "numeric", "NULL")
trainingSet <- read.table("./UCI HAR Dataset/train/X_train.txt", colClasses = rawIndx)
testSet <- read.table("./UCI HAR Dataset/test/X_test.txt", colClasses = rawIndx)
data <- rbind(trainingSet, testSet)
names(data) <- features
# Get activity labels column
trainingLabel <- read.table("./UCI HAR Dataset/train/y_train.txt")
testLabel <- read.table("./UCI HAR Dataset/test/y_test.txt")
activityLabels <- read.table("./UCI HAR Dataset/activity_labels.txt",
colClasses = c("NULL", "character")) # factor map
activity <- rbind(trainingLabel, testLabel)
activity <- as.factor(activity[[1]])
levels(activity) <- activityLabels[[1]]
# Get subjects column
trainingSubject <- read.table("./UCI HAR Dataset/train/subject_train.txt")
testSubject <- read.table("./UCI HAR Dataset/test/subject_test.txt")
subject <- rbind(trainingSubject, testSubject)
subject <- as.factor(subject[[1]])
# Merge activity labels with rest of data
data <- cbind("activity" = activity, "subject" = subject, data)
# Create a summarised version of data for coursera submission
# consists averaged values for each acitivty and subject
shortData <- aggregate(data[,-(1:2)], by=list("activity"=data$activity, "subject"=data$subject), mean)
write.table(shortData, file = "tidy_data.txt", row.names = FALSE) | /run_analysis.R | no_license | MAUMITA/GettingAndCleaningDataProject | R | false | false | 2,298 | r | ### Getting and Cleaning Data
### Course Project 1
### Create Tidy data from a raw dataset
# Download & unzip raw data file
dataURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(dataURL, "UCI HAR Dataset.zip", quiet = TRUE)
unzip("UCI HAR Dataset.zip")
# Get indexes of columns which contain mean/stdev info
features <- read.table("./UCI HAR Dataset/features.txt", colClasses = c("NULL", "character"))
rawIndx <- sapply(features[[1]], function(x) grepl("mean()", x, fixed = TRUE), USE.NAMES = FALSE) |
sapply(features[[1]], function(x) grepl("std()" , x, fixed = TRUE), USE.NAMES = FALSE)
# Get main headers of mean/stdev info & format out "-" & "()"
features <- t(features[rawIndx,]) # headers
features <- sub("-", "_", features, fixed = TRUE)
features <- sub("-", "_", features, fixed = TRUE)
features <- sub("()", "", features, fixed = TRUE)
# Get main data of mean/stdev info
rawIndx <- ifelse(rawIndx, "numeric", "NULL")
trainingSet <- read.table("./UCI HAR Dataset/train/X_train.txt", colClasses = rawIndx)
testSet <- read.table("./UCI HAR Dataset/test/X_test.txt", colClasses = rawIndx)
data <- rbind(trainingSet, testSet)
names(data) <- features
# Get activity labels column
trainingLabel <- read.table("./UCI HAR Dataset/train/y_train.txt")
testLabel <- read.table("./UCI HAR Dataset/test/y_test.txt")
activityLabels <- read.table("./UCI HAR Dataset/activity_labels.txt",
colClasses = c("NULL", "character")) # factor map
activity <- rbind(trainingLabel, testLabel)
activity <- as.factor(activity[[1]])
levels(activity) <- activityLabels[[1]]
# Get subjects column
trainingSubject <- read.table("./UCI HAR Dataset/train/subject_train.txt")
testSubject <- read.table("./UCI HAR Dataset/test/subject_test.txt")
subject <- rbind(trainingSubject, testSubject)
subject <- as.factor(subject[[1]])
# Merge activity labels with rest of data
data <- cbind("activity" = activity, "subject" = subject, data)
# Create a summarised version of data for coursera submission
# consists averaged values for each acitivty and subject
shortData <- aggregate(data[,-(1:2)], by=list("activity"=data$activity, "subject"=data$subject), mean)
write.table(shortData, file = "tidy_data.txt", row.names = FALSE) |
#' @export
makeRLearner.regr.fnn = function() {
makeRLearnerRegr(
cl = "regr.fnn",
package = "FNN",
# l is for reject option. cannot be done with mlr atm
par.set = makeParamSet(
makeIntegerLearnerParam(id = "k", default = 1L, lower = 1L),
makeLogicalLearnerParam(id = "use.all", default = TRUE, requires = expression(algorithm == "VR")),
makeDiscreteLearnerParam(id = "algorithm", default = "cover_tree", values = list("cover_tree", "kd_tree", "VR"))
),
properties = c("numerics"),
name = "Fast k-Nearest Neighbor",
short.name = "fnn",
note = ""
)
}
#' @export
trainLearner.regr.fnn = function(.learner, .task, .subset, .weights = NULL, ...) {
d = getTaskData(.task, .subset, target.extra = TRUE)
list(train = d, parset = list(...))
}
#' @export
predictLearner.regr.fnn = function(.learner, .model, .newdata, ...) {
m = .model$learner.model
pars = c(list(train = m$train$data, test = .newdata, y = m$train$target), m$parset, list(...))
do.call(FNN::knn.reg, pars)$pred
}
| /R/RLearner_regr_fnn.R | no_license | dickoa/mlr | R | false | false | 1,042 | r | #' @export
makeRLearner.regr.fnn = function() {
makeRLearnerRegr(
cl = "regr.fnn",
package = "FNN",
# l is for reject option. cannot be done with mlr atm
par.set = makeParamSet(
makeIntegerLearnerParam(id = "k", default = 1L, lower = 1L),
makeLogicalLearnerParam(id = "use.all", default = TRUE, requires = expression(algorithm == "VR")),
makeDiscreteLearnerParam(id = "algorithm", default = "cover_tree", values = list("cover_tree", "kd_tree", "VR"))
),
properties = c("numerics"),
name = "Fast k-Nearest Neighbor",
short.name = "fnn",
note = ""
)
}
#' @export
trainLearner.regr.fnn = function(.learner, .task, .subset, .weights = NULL, ...) {
d = getTaskData(.task, .subset, target.extra = TRUE)
list(train = d, parset = list(...))
}
#' @export
predictLearner.regr.fnn = function(.learner, .model, .newdata, ...) {
m = .model$learner.model
pars = c(list(train = m$train$data, test = .newdata, y = m$train$target), m$parset, list(...))
do.call(FNN::knn.reg, pars)$pred
}
|
library(quantmod) | /ch1.R | no_license | gurudk/fts3 | R | false | false | 17 | r | library(quantmod) |
#July 8 2020. R script processing model validation against observational data of ch4, doc, and ace ELM_SPRUCE
getwd()
setwd("/Users/xxuadmin/BUSINESS/PUBLICATIONS/Published/2020/2020_Ricciuto_Xu_ELM_SPRUCE/20200708/r")
dailych4 <- read.table("dailych4flux", header=TRUE)
summary(lm(dailych4$ModeledCH4~dailych4$ObservedCH4Hanson))
out1 <- capture.output(summary(lm(dailych4$ModeledCH4~dailych4$ObservedCH4Hanson)))
cat("comparision of Hanson CH4 with ELM_SPRUCE", out1, file="statistic_output.txt", sep=" ", append=TRUE)
dailych4_s <- read.table("dailych4flux_bridgham", header=TRUE)
summary(lm(dailych4_s$ModeledCH4~dailych4_s$ObservedCH4Bridgham))
out2 <- capture.output(summary(lm(dailych4_s$ModeledCH4~dailych4_s$ObservedCH4Bridgham)))
cat("comparision of Bridgham CH4 with ELM_SPRUCE", out2, file="statistic_output.txt", sep=" ", append=TRUE)
dailych4_clm4me <- read.table("dailych4flux_clm4me", header=TRUE)
summary(lm(dailych4_clm4me$CLM4Me~dailych4_clm4me$ObservedCH4Hanson))
out3 <- capture.output(summary(lm(dailych4_clm4me$CLM4Me~dailych4_clm4me$ObservedCH4Hanson)))
cat("comparision of Hanson CH4 with CLM4Me", out3, file="statistic_output.txt", sep=" ", append=TRUE)
DOC <- read.table("DOC", header=TRUE)
summary(lm(DOC$ModeledDOC~DOC$ObservedDOC))
out4 <- capture.output(summary(lm(DOC$ModeledDOC~DOC$ObservedDOC)))
cat("comparision of DOC with ELM_SPRUCE", out4, file="statistic_output.txt", sep=" ", append=TRUE)
ACE <- read.table("ACE", header=TRUE)
summary(lm(ACE$ModeledACE~ACE$ObservedACE))
out5 <- capture.output(summary(lm(ACE$ModeledACE~ACE$ObservedACE)))
cat("comparision of Acetate with ELM_SPRUCE", out5, file="statistic_output.txt", sep=" ", append=TRUE)
| /2020July_ELM_SPRUCE/script.R | no_license | email-clm/R_stat_visualization | R | false | false | 1,692 | r | #July 8 2020. R script processing model validation against observational data of ch4, doc, and ace ELM_SPRUCE
getwd()
setwd("/Users/xxuadmin/BUSINESS/PUBLICATIONS/Published/2020/2020_Ricciuto_Xu_ELM_SPRUCE/20200708/r")
dailych4 <- read.table("dailych4flux", header=TRUE)
summary(lm(dailych4$ModeledCH4~dailych4$ObservedCH4Hanson))
out1 <- capture.output(summary(lm(dailych4$ModeledCH4~dailych4$ObservedCH4Hanson)))
cat("comparision of Hanson CH4 with ELM_SPRUCE", out1, file="statistic_output.txt", sep=" ", append=TRUE)
dailych4_s <- read.table("dailych4flux_bridgham", header=TRUE)
summary(lm(dailych4_s$ModeledCH4~dailych4_s$ObservedCH4Bridgham))
out2 <- capture.output(summary(lm(dailych4_s$ModeledCH4~dailych4_s$ObservedCH4Bridgham)))
cat("comparision of Bridgham CH4 with ELM_SPRUCE", out2, file="statistic_output.txt", sep=" ", append=TRUE)
dailych4_clm4me <- read.table("dailych4flux_clm4me", header=TRUE)
summary(lm(dailych4_clm4me$CLM4Me~dailych4_clm4me$ObservedCH4Hanson))
out3 <- capture.output(summary(lm(dailych4_clm4me$CLM4Me~dailych4_clm4me$ObservedCH4Hanson)))
cat("comparision of Hanson CH4 with CLM4Me", out3, file="statistic_output.txt", sep=" ", append=TRUE)
DOC <- read.table("DOC", header=TRUE)
summary(lm(DOC$ModeledDOC~DOC$ObservedDOC))
out4 <- capture.output(summary(lm(DOC$ModeledDOC~DOC$ObservedDOC)))
cat("comparision of DOC with ELM_SPRUCE", out4, file="statistic_output.txt", sep=" ", append=TRUE)
ACE <- read.table("ACE", header=TRUE)
summary(lm(ACE$ModeledACE~ACE$ObservedACE))
out5 <- capture.output(summary(lm(ACE$ModeledACE~ACE$ObservedACE)))
cat("comparision of Acetate with ELM_SPRUCE", out5, file="statistic_output.txt", sep=" ", append=TRUE)
|
# load relevant libraries
library(R.matlab)
library(ggplot2)
library(grid)
library(gridExtra)
library(cowplot)
# Change working directory to where file is (only works in R studio)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# import data
data_frommat <- readMat("multiplecycles_out.mat",header=T) # read in as list
df_frommat <- as.data.frame(data_frommat) # convert list to data frame
names(df_frommat) <- c('t','bal','plasmaT','tumorM','tumorT')
g1 <- ggplot(df_frommat) +
geom_line(aes(x=t,y=plasmaT,color="Regimen 1")) +
scale_x_continuous("Time (Months)",limits=c(0,6)) +
scale_y_continuous("TMZ in Plasma (M)") +
scale_color_manual(name='',values=c('Regimen 1'='#CC0033')) +
theme_bw()
g2 <- ggplot(df_frommat) +
geom_line(aes(x=t,y=tumorT,color="Regimen 1")) +
scale_x_continuous("Time (Months)",limits=c(0,6)) +
scale_y_continuous("TMZ in Tumor (M)") +
scale_color_manual(name='',values=c('Regimen 1'='#CC0033')) +
theme_bw()
g3 <- ggplot(df_frommat) +
geom_line(aes(x=t,y=tumorM,color="Regimen 1")) +
scale_x_continuous("Time (Months)",limits=c(0,6)) +
scale_y_continuous("MTIC in Tumor (M)") +
scale_color_manual(name='',values=c('Regimen 1'='#CC0033')) +
theme_bw()
g4 <- ggplot(df_frommat) +
geom_line(aes(x=t,y=bal,color="Regimen 1")) +
scale_x_continuous("Time (Months)",limits=c(0,6)) +
scale_y_continuous("Mass Balance") +
scale_color_manual(name='',values=c('Regimen 1'='#CC0033')) +
theme_bw()
p1 <- plot_grid(g1 + theme(legend.position="none"),
g2 + theme(legend.position="none"),
nrow=1)
p2 <- plot_grid(g3 + theme(legend.position="none"),
g4 + theme(legend.position="none"),
nrow=1)
title = textGrob("Concentration Profiles for Multiple Cycles",gp=gpar(fontsize=15,font=3))
out = grid.arrange(p1,p2,nrow=2,top=title)
ggsave(file="MultipleCycles.pdf",plot=out,width=10,height=6) # Save the figure | /Dosing/multiplecycles.R | no_license | vayyappan/TemozolomideModel | R | false | false | 1,949 | r | # load relevant libraries
library(R.matlab)
library(ggplot2)
library(grid)
library(gridExtra)
library(cowplot)
# Change working directory to where file is (only works in R studio)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# import data
data_frommat <- readMat("multiplecycles_out.mat",header=T) # read in as list
df_frommat <- as.data.frame(data_frommat) # convert list to data frame
names(df_frommat) <- c('t','bal','plasmaT','tumorM','tumorT')
g1 <- ggplot(df_frommat) +
geom_line(aes(x=t,y=plasmaT,color="Regimen 1")) +
scale_x_continuous("Time (Months)",limits=c(0,6)) +
scale_y_continuous("TMZ in Plasma (M)") +
scale_color_manual(name='',values=c('Regimen 1'='#CC0033')) +
theme_bw()
g2 <- ggplot(df_frommat) +
geom_line(aes(x=t,y=tumorT,color="Regimen 1")) +
scale_x_continuous("Time (Months)",limits=c(0,6)) +
scale_y_continuous("TMZ in Tumor (M)") +
scale_color_manual(name='',values=c('Regimen 1'='#CC0033')) +
theme_bw()
g3 <- ggplot(df_frommat) +
geom_line(aes(x=t,y=tumorM,color="Regimen 1")) +
scale_x_continuous("Time (Months)",limits=c(0,6)) +
scale_y_continuous("MTIC in Tumor (M)") +
scale_color_manual(name='',values=c('Regimen 1'='#CC0033')) +
theme_bw()
g4 <- ggplot(df_frommat) +
geom_line(aes(x=t,y=bal,color="Regimen 1")) +
scale_x_continuous("Time (Months)",limits=c(0,6)) +
scale_y_continuous("Mass Balance") +
scale_color_manual(name='',values=c('Regimen 1'='#CC0033')) +
theme_bw()
p1 <- plot_grid(g1 + theme(legend.position="none"),
g2 + theme(legend.position="none"),
nrow=1)
p2 <- plot_grid(g3 + theme(legend.position="none"),
g4 + theme(legend.position="none"),
nrow=1)
title = textGrob("Concentration Profiles for Multiple Cycles",gp=gpar(fontsize=15,font=3))
out = grid.arrange(p1,p2,nrow=2,top=title)
ggsave(file="MultipleCycles.pdf",plot=out,width=10,height=6) # Save the figure |
setwd("C:/saeed/WorkingDirectory")
library(ggplot2)
FR = read.csv("C:/saeed/FR_Interactions.csv")
ggplot(FR, aes(FR$cont,fill=factor(inter))) +
geom_density()
require(gridExtra)
p1=ggplot(FR, aes(x=FR$INT_InPerson, y=FR$CONSTITUENT_COUNT, color=FR$REVENUE_AMOUNT)) +
geom_point() + # Use hollow circles
scale_colour_continuous(guide = FALSE) + # to set color legend off
aes(size=FR$REVENUE_AMOUNT)+
guides(size=FALSE)+
geom_smooth(method=lm) +
# ylim(0, 2000000) +
xlab("InPerson Intraction #") +
ylab("Constituent #")
p2=ggplot(FR, aes(x=FR$INT_Email, y=FR$CONSTITUENT_COUNT,color=FR$REVENUE_AMOUNT)) +
geom_point() + # Use hollow circles
scale_colour_continuous(guide = FALSE) +
aes(size=FR$REVENUE_AMOUNT)+
guides(size=FALSE)+
geom_smooth(method=lm) +
xlab("Email Intraction #") +
ylab("Constituent #")
p3=ggplot(FR, aes(x=FR$INT_Phone, y=FR$CONSTITUENT_COUNT,color=FR$REVENUE_AMOUNT)) +
geom_point() + # Use hollow circles
aes(size=FR$REVENUE_AMOUNT)+
scale_colour_continuous(guide = FALSE) +
guides(size=FALSE)+
geom_smooth(method=lm) +
xlab("Phone Intraction #") +
ylab("Constituent #")
grid.arrange(p1, p2,p3, ncol=3)
| /FR_INTRACTION.R | no_license | saeedaghabozorgi/SK_FRanalysis_R | R | false | false | 1,198 | r | setwd("C:/saeed/WorkingDirectory")
library(ggplot2)
FR = read.csv("C:/saeed/FR_Interactions.csv")
ggplot(FR, aes(FR$cont,fill=factor(inter))) +
geom_density()
require(gridExtra)
p1=ggplot(FR, aes(x=FR$INT_InPerson, y=FR$CONSTITUENT_COUNT, color=FR$REVENUE_AMOUNT)) +
geom_point() + # Use hollow circles
scale_colour_continuous(guide = FALSE) + # to set color legend off
aes(size=FR$REVENUE_AMOUNT)+
guides(size=FALSE)+
geom_smooth(method=lm) +
# ylim(0, 2000000) +
xlab("InPerson Intraction #") +
ylab("Constituent #")
p2=ggplot(FR, aes(x=FR$INT_Email, y=FR$CONSTITUENT_COUNT,color=FR$REVENUE_AMOUNT)) +
geom_point() + # Use hollow circles
scale_colour_continuous(guide = FALSE) +
aes(size=FR$REVENUE_AMOUNT)+
guides(size=FALSE)+
geom_smooth(method=lm) +
xlab("Email Intraction #") +
ylab("Constituent #")
p3=ggplot(FR, aes(x=FR$INT_Phone, y=FR$CONSTITUENT_COUNT,color=FR$REVENUE_AMOUNT)) +
geom_point() + # Use hollow circles
aes(size=FR$REVENUE_AMOUNT)+
scale_colour_continuous(guide = FALSE) +
guides(size=FALSE)+
geom_smooth(method=lm) +
xlab("Phone Intraction #") +
ylab("Constituent #")
grid.arrange(p1, p2,p3, ncol=3)
|
#' Perform auto authentication
#'
#' This helper function lets you use environment variables to auto-authenticate on package load, intended for calling by \link{gar_attach_auto_auth}
#'
#' @param no_auto If TRUE, ignore auto-authentication settings
#' @param required_scopes Required scopes needed to authenticate - needs to match at least one
#' @param environment_var Name of environment var that contains auth file path
#' @param new_user Deprecated, not used
#'
#' The authentication file can be a \code{.httr-oauth} file created via \link{gar_auth}
#' or a Google service JSON file downloaded from the Google API credential console,
#' with file extension \code{.json}.
#'
#' You can use this in your code to authenticate from a file location specified in file,
#' but it is mainly intended to be called on package load via \link{gar_attach_auto_auth}.
#'
#'
#' \code{environment_var} This is the name that will be called via \link{Sys.getenv} on library load. The environment variable will contain an absolute file path to the location of an authentication file.
#'
#' @seealso
#'
#' Help files for \link{.onAttach}
#'
#' @return an OAuth token object, specifically a
#' \code{\link[=Token-class]{Token2.0}}, invisibly
#'
#' @export
#' @family authentication functions
#' @import assertthat
#' @importFrom tools file_ext
gar_auto_auth <- function(required_scopes,
no_auto = NULL,
environment_var = "GAR_AUTH_FILE",
new_user = NULL) {
if(!is.null(new_user)){
warning("Argument new_user is deprecated and will be removed next release.")
}
if(!is.null(no_auto)){
warning("Argument no_auto is deprecated and will be removed next release.")
}
if(is.null(required_scopes)){
myMessage("No scopes have been set, set them via
options(googleAuthR.scopes.selected)
- no authentication attempted.", level = 2)
return(NULL)
}
assert_that(
is.character(required_scopes),
is.string(environment_var)
)
if(!all(getOption("googleAuthR.scopes.selected") %in% required_scopes)){
stop("Cannot authenticate - options(googleAuthR.scopes.selected) needs to be set to include",
paste(required_scopes, collapse = " or "), " but scopes set are: ",
paste(getOption("googleAuthR.scopes.selected"), collapse = " "))
}
auth_file <- Sys.getenv(environment_var)
if(auth_file == ""){
## Can't do anything, return.
return(NULL)
}
if(grepl("^[[:alnum:].-_]+@[[:alnum:].-]+$", auth_file)){
myMessage("Auto-auth - email address", level = 2)
make_app()
token <- gargle::credentials_user_oauth2(
scopes = getOption("googleAuthR.scopes.selected"),
app = gar_oauth_app(),
package = "googleAuthR",
email = auth_file
)
.auth$set_cred(token)
.auth$set_auth_active(TRUE)
return(token)
}
if(!file.exists(auth_file)){
## auth_file specified but not present
stop(environment_var, " specified in environment variables but file not found -
looked for ", auth_file, " and called from ", getwd())
}
## auth_file specified in environment_var
## Service JSON file
if(file_ext(auth_file) == "json"){
myMessage("Auto-auth - json", level = 2)
out <- gar_auth_service(auth_file)
} else {
## .httr-oauth file
myMessage("Auto-auth - file path", level = 2)
out <- gar_auth(token = auth_file)
}
out
}
#' Auto Authentication function for use within .onAttach
#'
#' To be placed within \link{.onAttach} to auto load an authentication file from an environment variable.
#'
#' @param required_scopes A character vector of minimum required scopes for this API library
#' @param environment_var The name of the environment variable where the file path to the authentication file is kept
#'
#' This function works with \link{gar_auto_auth}. It is intended to be placed within the \link{.onAttach} hook so that it loads when you load your library.
#'
#' For auto-authentication to work, the environment variable needs to hold a file path to an existing auth file such as created via \link{gar_auth} or a JSON file file download from the Google API console.
#'
#' @examples
#'
#' \dontrun{
#'
#' .onAttach <- function(libname, pkgname){
#'
#' googleAuthR::gar_attach_auto_auth("https://www.googleapis.com/auth/urlshortener", "US_AUTH_FILE")
#'
#' }
#'
#' ## will only work if you have US_AUTH_FILE environment variable pointing to an auth file location
#' ## .Renviron example
#' US_AUTH_FILE="/home/mark/auth/urlshortnerauth.json"
#'
#' }
#'
#' @return Invisible, used for its side effects of calling auto-authentication.
#' @export
#' @family authentication functions
#' @import assertthat
gar_attach_auto_auth <- function(required_scopes,
environment_var = "GAR_AUTH_FILE"){
if(is.null(required_scopes)){
myMessage("No scopes have been set, set them via
options(googleAuthR.scopes.selected) -
no authentication attempted.", level = 2)
return(NULL)
}
if(Sys.getenv(environment_var) == ""){
myMessage("No environment argument found, looked in ", environment_var,
level = 2)
return(NULL)
}
assert_that(
is.character(required_scopes),
is.string(environment_var)
)
scopes <- getOption("googleAuthR.scopes.selected")
if(all(!(required_scopes %in% scopes))){
packageStartupMessage("Setting scopes to ",
paste(required_scopes, collapse = " and "))
new_scopes <- required_scopes
} else {
new_scopes <- scopes
}
options(googleAuthR.scopes.selected = new_scopes)
tryCatch({
token <- gar_auto_auth(required_scopes = required_scopes,
environment_var = environment_var)
if(inherits(token, "Token")){
mess <- paste("Successfully auto-authenticated via",
Sys.getenv(environment_var))
} else {
mess <- paste("No token in auto-auth via",
Sys.getenv(environment_var))
}
packageStartupMessage(mess)
}, error = function(ex){
packageStartupMessage("Failed! Auto-authentication via ",
environment_var, "=",
Sys.getenv(environment_var),
" - error was: ", ex$error, ex$message)
})
invisible()
}
| /R/auto_auth.R | no_license | jeffwzhong1994/googleAuthR | R | false | false | 6,476 | r | #' Perform auto authentication
#'
#' This helper function lets you use environment variables to auto-authenticate on package load, intended for calling by \link{gar_attach_auto_auth}
#'
#' @param no_auto If TRUE, ignore auto-authentication settings
#' @param required_scopes Required scopes needed to authenticate - needs to match at least one
#' @param environment_var Name of environment var that contains auth file path
#' @param new_user Deprecated, not used
#'
#' The authentication file can be a \code{.httr-oauth} file created via \link{gar_auth}
#' or a Google service JSON file downloaded from the Google API credential console,
#' with file extension \code{.json}.
#'
#' You can use this in your code to authenticate from a file location specified in file,
#' but it is mainly intended to be called on package load via \link{gar_attach_auto_auth}.
#'
#'
#' \code{environment_var} This is the name that will be called via \link{Sys.getenv} on library load. The environment variable will contain an absolute file path to the location of an authentication file.
#'
#' @seealso
#'
#' Help files for \link{.onAttach}
#'
#' @return an OAuth token object, specifically a
#' \code{\link[=Token-class]{Token2.0}}, invisibly
#'
#' @export
#' @family authentication functions
#' @import assertthat
#' @importFrom tools file_ext
gar_auto_auth <- function(required_scopes,
no_auto = NULL,
environment_var = "GAR_AUTH_FILE",
new_user = NULL) {
if(!is.null(new_user)){
warning("Argument new_user is deprecated and will be removed next release.")
}
if(!is.null(no_auto)){
warning("Argument no_auto is deprecated and will be removed next release.")
}
if(is.null(required_scopes)){
myMessage("No scopes have been set, set them via
options(googleAuthR.scopes.selected)
- no authentication attempted.", level = 2)
return(NULL)
}
assert_that(
is.character(required_scopes),
is.string(environment_var)
)
if(!all(getOption("googleAuthR.scopes.selected") %in% required_scopes)){
stop("Cannot authenticate - options(googleAuthR.scopes.selected) needs to be set to include",
paste(required_scopes, collapse = " or "), " but scopes set are: ",
paste(getOption("googleAuthR.scopes.selected"), collapse = " "))
}
auth_file <- Sys.getenv(environment_var)
if(auth_file == ""){
## Can't do anything, return.
return(NULL)
}
if(grepl("^[[:alnum:].-_]+@[[:alnum:].-]+$", auth_file)){
myMessage("Auto-auth - email address", level = 2)
make_app()
token <- gargle::credentials_user_oauth2(
scopes = getOption("googleAuthR.scopes.selected"),
app = gar_oauth_app(),
package = "googleAuthR",
email = auth_file
)
.auth$set_cred(token)
.auth$set_auth_active(TRUE)
return(token)
}
if(!file.exists(auth_file)){
## auth_file specified but not present
stop(environment_var, " specified in environment variables but file not found -
looked for ", auth_file, " and called from ", getwd())
}
## auth_file specified in environment_var
## Service JSON file
if(file_ext(auth_file) == "json"){
myMessage("Auto-auth - json", level = 2)
out <- gar_auth_service(auth_file)
} else {
## .httr-oauth file
myMessage("Auto-auth - file path", level = 2)
out <- gar_auth(token = auth_file)
}
out
}
#' Auto Authentication function for use within .onAttach
#'
#' To be placed within \link{.onAttach} to auto load an authentication file from an environment variable.
#'
#' @param required_scopes A character vector of minimum required scopes for this API library
#' @param environment_var The name of the environment variable where the file path to the authentication file is kept
#'
#' This function works with \link{gar_auto_auth}. It is intended to be placed within the \link{.onAttach} hook so that it loads when you load your library.
#'
#' For auto-authentication to work, the environment variable needs to hold a file path to an existing auth file such as created via \link{gar_auth} or a JSON file file download from the Google API console.
#'
#' @examples
#'
#' \dontrun{
#'
#' .onAttach <- function(libname, pkgname){
#'
#' googleAuthR::gar_attach_auto_auth("https://www.googleapis.com/auth/urlshortener", "US_AUTH_FILE")
#'
#' }
#'
#' ## will only work if you have US_AUTH_FILE environment variable pointing to an auth file location
#' ## .Renviron example
#' US_AUTH_FILE="/home/mark/auth/urlshortnerauth.json"
#'
#' }
#'
#' @return Invisible, used for its side effects of calling auto-authentication.
#' @export
#' @family authentication functions
#' @import assertthat
gar_attach_auto_auth <- function(required_scopes,
environment_var = "GAR_AUTH_FILE"){
if(is.null(required_scopes)){
myMessage("No scopes have been set, set them via
options(googleAuthR.scopes.selected) -
no authentication attempted.", level = 2)
return(NULL)
}
if(Sys.getenv(environment_var) == ""){
myMessage("No environment argument found, looked in ", environment_var,
level = 2)
return(NULL)
}
assert_that(
is.character(required_scopes),
is.string(environment_var)
)
scopes <- getOption("googleAuthR.scopes.selected")
if(all(!(required_scopes %in% scopes))){
packageStartupMessage("Setting scopes to ",
paste(required_scopes, collapse = " and "))
new_scopes <- required_scopes
} else {
new_scopes <- scopes
}
options(googleAuthR.scopes.selected = new_scopes)
tryCatch({
token <- gar_auto_auth(required_scopes = required_scopes,
environment_var = environment_var)
if(inherits(token, "Token")){
mess <- paste("Successfully auto-authenticated via",
Sys.getenv(environment_var))
} else {
mess <- paste("No token in auto-auth via",
Sys.getenv(environment_var))
}
packageStartupMessage(mess)
}, error = function(ex){
packageStartupMessage("Failed! Auto-authentication via ",
environment_var, "=",
Sys.getenv(environment_var),
" - error was: ", ex$error, ex$message)
})
invisible()
}
|
# dplyr package version 0.8.0.1
library(dplyr)
# data.table package version 1.12.2
library(data.table)
# functions for getting initial datasets
trimmed_dataset <- function(X_text, Y_text, subject_text){
# uses fread from 'data.table' package to factilitate much faster input and formatting it as a dataframe
X = fread(file = X_text, sep = " ", data.table = FALSE)
Y = fread(file = Y_text, sep = " ", data.table = FALSE)
subject = read.table(subject_text)
##############################
# Appropriately label the data set with descriptive variable names.
##############################
# getting feature names so we can give accurate names to columns
feature_names = read.table("UCI HAR Dataset/features.txt", sep = " ")
feature_names = as.character(feature_names$V2)
names(X) = feature_names
names(Y) = c("Activity")
names(subject) = c("Subject")
##############################
# Extract only the measurements on the mean and standard deviation for each measurement.
##############################
# extracting all column names related to mean() and std() using grep
names_std_mean = grep("mean\\(\\)|std\\(\\)", names(X), value = TRUE)
X_polished = X[, names(X) %in% names_std_mean]
##############################
# Use descriptive activity names to name the activities in the data set
##############################
activity_labels = read.table("UCI HAR Dataset/activity_labels.txt")
# replacing Y labels with the corresponding activity names
rownames(activity_labels) = activity_labels$V1
Y = mutate(Y, Activity = activity_labels[Activity, "V2"])
# attaching them together
polished_dataset = cbind(subject, X_polished, Y)
return(polished_dataset)
}
##############################
# BEGINNING OF MAIN PROGRAM
##############################
# Check if the dataset folder(whose default name is "UCI HAR Dataset") exists in the current directory
if(!file.exists("UCI HAR Dataset")){
stop("The dataset folder 'UCI HAR Dataset' does not exist. If the dataset folder exists, please
rename it to 'UCI HAR Dataset'. If not, either copy the existing dataset into the current
folder or run the provided script 'download_dataset.R' to download the dataset.")
}
# destinations to train and test datasets
train_X_text = "UCI HAR Dataset/train/X_train.txt"
train_Y_text = "UCI HAR Dataset/train/y_train.txt"
subject_train_text = "UCI HAR Dataset/train/subject_train.txt"
test_X_text = "UCI HAR Dataset/test/X_test.txt"
test_Y_text = "UCI HAR Dataset/test/y_test.txt"
subject_test_text = "UCI HAR Dataset/test/subject_test.txt"
##############################
# Merge the training and the test sets to create one data set.
##############################
train_data = trimmed_dataset(train_X_text, train_Y_text, subject_train_text)
test_data = trimmed_dataset(test_X_text, test_Y_text, subject_test_text)
total_dataset = rbind(train_data, test_data)
##############################
# From the data set, create a second, independent tidy data set with the average
# of each variable for each activity and each subject.
##############################
# Summarising by taking the average of all variables, grouped by Subject and Activity
tidy_dataset = total_dataset %>% group_by(Subject, Activity) %>% summarise_all(list(mean)) %>% ungroup()
# adding "Avg-" to the beginning of every column - excluding Activity and Subject - to make
# descriptive variable names
new_names = c(names(select(tidy_dataset, Activity:Subject)), paste0("Avg-", names(select(tidy_dataset, -(Activity:Subject)))))
# fix small mistake in the column names - In some columns the word 'Body' is given twice - eg. fBodyBodyAccJerkMag
new_names = gsub("BodyBody","Body",new_names)
names(tidy_dataset) = new_names
# Writing tidy dataset to file 'tidy_dataset.txt'
write.table(tidy_dataset, "tidy_dataset.txt", row.names = FALSE)
| /run_analysis.R | no_license | lesasi/-Getting-and-Cleaning-Data-Assignment | R | false | false | 3,905 | r | # dplyr package version 0.8.0.1
library(dplyr)
# data.table package version 1.12.2
library(data.table)
# functions for getting initial datasets
trimmed_dataset <- function(X_text, Y_text, subject_text){
# uses fread from 'data.table' package to factilitate much faster input and formatting it as a dataframe
X = fread(file = X_text, sep = " ", data.table = FALSE)
Y = fread(file = Y_text, sep = " ", data.table = FALSE)
subject = read.table(subject_text)
##############################
# Appropriately label the data set with descriptive variable names.
##############################
# getting feature names so we can give accurate names to columns
feature_names = read.table("UCI HAR Dataset/features.txt", sep = " ")
feature_names = as.character(feature_names$V2)
names(X) = feature_names
names(Y) = c("Activity")
names(subject) = c("Subject")
##############################
# Extract only the measurements on the mean and standard deviation for each measurement.
##############################
# extracting all column names related to mean() and std() using grep
names_std_mean = grep("mean\\(\\)|std\\(\\)", names(X), value = TRUE)
X_polished = X[, names(X) %in% names_std_mean]
##############################
# Use descriptive activity names to name the activities in the data set
##############################
activity_labels = read.table("UCI HAR Dataset/activity_labels.txt")
# replacing Y labels with the corresponding activity names
rownames(activity_labels) = activity_labels$V1
Y = mutate(Y, Activity = activity_labels[Activity, "V2"])
# attaching them together
polished_dataset = cbind(subject, X_polished, Y)
return(polished_dataset)
}
##############################
# BEGINNING OF MAIN PROGRAM
##############################
# Check if the dataset folder(whose default name is "UCI HAR Dataset") exists in the current directory
if(!file.exists("UCI HAR Dataset")){
stop("The dataset folder 'UCI HAR Dataset' does not exist. If the dataset folder exists, please
rename it to 'UCI HAR Dataset'. If not, either copy the existing dataset into the current
folder or run the provided script 'download_dataset.R' to download the dataset.")
}
# destinations to train and test datasets
train_X_text = "UCI HAR Dataset/train/X_train.txt"
train_Y_text = "UCI HAR Dataset/train/y_train.txt"
subject_train_text = "UCI HAR Dataset/train/subject_train.txt"
test_X_text = "UCI HAR Dataset/test/X_test.txt"
test_Y_text = "UCI HAR Dataset/test/y_test.txt"
subject_test_text = "UCI HAR Dataset/test/subject_test.txt"
##############################
# Merge the training and the test sets to create one data set.
##############################
train_data = trimmed_dataset(train_X_text, train_Y_text, subject_train_text)
test_data = trimmed_dataset(test_X_text, test_Y_text, subject_test_text)
total_dataset = rbind(train_data, test_data)
##############################
# From the data set, create a second, independent tidy data set with the average
# of each variable for each activity and each subject.
##############################
# Summarising by taking the average of all variables, grouped by Subject and Activity
tidy_dataset = total_dataset %>% group_by(Subject, Activity) %>% summarise_all(list(mean)) %>% ungroup()
# adding "Avg-" to the beginning of every column - excluding Activity and Subject - to make
# descriptive variable names
new_names = c(names(select(tidy_dataset, Activity:Subject)), paste0("Avg-", names(select(tidy_dataset, -(Activity:Subject)))))
# fix small mistake in the column names - In some columns the word 'Body' is given twice - eg. fBodyBodyAccJerkMag
new_names = gsub("BodyBody","Body",new_names)
names(tidy_dataset) = new_names
# Writing tidy dataset to file 'tidy_dataset.txt'
write.table(tidy_dataset, "tidy_dataset.txt", row.names = FALSE)
|
#script to make slopes and dem in to a netCDF file
#load packages
library(abind)
library(ncdf4)
library(lattice)
library(fields)
setwd("/Users/katie/Dropbox/ParFlow Domain/slopes/")
fundir=("/Users/katie/Dropbox/ParFlow Domain/slopes/functions/")
outdir=("/Users/katie/Dropbox/netCDF stuff/netCDF_in_R/")
dem=matrix(scan("dem.txt"), ncol=246, byrow=T)
slopex=matrix(scan("slopex.txt"), ncol=246, byrow=T)
slopey=matrix(scan("slopey.txt"), ncol=246, byrow= T)
ny=nrow(dem)
nx=ncol(dem)
demT=t(dem[ny:1,])
image.plot(demT)
# write to netCDF file
#create path and outfile name
outpath <- outdir
outname <- paste("elevslopes",".nc", sep="")
outfname <- paste(outpath, outname, "", sep="")
x=seq(from=0, to=245, by=1)
y=seq(from=0,to=177, by=1)
# create and write the netCDF file -- ncdf4 version
# define dimensions
xdim <- ncdim_def(name="lon", units='', longname = '', vals= x)
ydim <- ncdim_def(name="lat", units='', longname='', vals= y)
# define variables
fillvalue <- 1e32
dlname <- "elevation"
elev_def <- ncvar_def(name="Elev",units="m",dim=list(xdim,ydim),missval=fillvalue,longname=dlname, prec="double")
dlname <- "slopes in x-direction"
x_def <- ncvar_def(name="slopex",units="m/m",dim=list(xdim,ydim),missval=fillvalue,longname=dlname, prec="double")
dlname <- "slopes in y-direction"
y_def <- ncvar_def(name="slopey",units="m/m",dim=list(xdim,ydim),missval=fillvalue,longname=dlname, prec="double")
vars=list(elev_def, x_def, y_def)
# create netCDF file and put arrays
cdf=nc_create(outfname, vars, force_v4=TRUE, verbose=TRUE)
# put variables
ncvar_put(cdf,elev_def,demT)
ncvar_put(cdf,x_def,slopex)
ncvar_put(cdf,y_def,slopey)
# Get a summary of the created file:
cdf
nc_close(cdf)
| /R_scripts/elev_slopes_netcdf.R | no_license | khm293/Sabino_model | R | false | false | 1,711 | r | #script to make slopes and dem in to a netCDF file
#load packages
library(abind)
library(ncdf4)
library(lattice)
library(fields)
setwd("/Users/katie/Dropbox/ParFlow Domain/slopes/")
fundir=("/Users/katie/Dropbox/ParFlow Domain/slopes/functions/")
outdir=("/Users/katie/Dropbox/netCDF stuff/netCDF_in_R/")
dem=matrix(scan("dem.txt"), ncol=246, byrow=T)
slopex=matrix(scan("slopex.txt"), ncol=246, byrow=T)
slopey=matrix(scan("slopey.txt"), ncol=246, byrow= T)
ny=nrow(dem)
nx=ncol(dem)
demT=t(dem[ny:1,])
image.plot(demT)
# write to netCDF file
#create path and outfile name
outpath <- outdir
outname <- paste("elevslopes",".nc", sep="")
outfname <- paste(outpath, outname, "", sep="")
x=seq(from=0, to=245, by=1)
y=seq(from=0,to=177, by=1)
# create and write the netCDF file -- ncdf4 version
# define dimensions
xdim <- ncdim_def(name="lon", units='', longname = '', vals= x)
ydim <- ncdim_def(name="lat", units='', longname='', vals= y)
# define variables
fillvalue <- 1e32
dlname <- "elevation"
elev_def <- ncvar_def(name="Elev",units="m",dim=list(xdim,ydim),missval=fillvalue,longname=dlname, prec="double")
dlname <- "slopes in x-direction"
x_def <- ncvar_def(name="slopex",units="m/m",dim=list(xdim,ydim),missval=fillvalue,longname=dlname, prec="double")
dlname <- "slopes in y-direction"
y_def <- ncvar_def(name="slopey",units="m/m",dim=list(xdim,ydim),missval=fillvalue,longname=dlname, prec="double")
vars=list(elev_def, x_def, y_def)
# create netCDF file and put arrays
cdf=nc_create(outfname, vars, force_v4=TRUE, verbose=TRUE)
# put variables
ncvar_put(cdf,elev_def,demT)
ncvar_put(cdf,x_def,slopex)
ncvar_put(cdf,y_def,slopey)
# Get a summary of the created file:
cdf
nc_close(cdf)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dotplot.R
\name{highlight_query}
\alias{highlight_query}
\alias{highlight_target}
\title{Highlight segments of a query or target genome in a dot plot}
\usage{
highlight_query(bed, fill = "yellow", colour = "black", alpha = 0.6)
highlight_target(bed, fill = "yellow", colour = "black", alpha = 0.6)
}
\arguments{
\item{bed}{\code{data.frame} or \code{tbl_df} containing a bed file, as returned by
\code{\link{read_bed}}. Should contain three columns named 'chrom', 'start'
and 'end'}
\item{fill}{character Fill colour for highlight segment}
\item{colour}{character Outline colour for highlight segment}
\item{alpha}{character Opacity ([0-1]) for highlight segment}
}
\description{
This plot is intended to be used in conjunction with \code{link{dotplot}}.
Adding \code{higlight_query} or \code{highlight_target} to a dotplot function call
(see examples below) will add a rectangular 'highlight' corresponding to a
particular genomic interval in the corresponding genome.
}
\examples{
ali <- read_paf( system.file("extdata", "fungi.paf", package="pafr") )
cen <- read_bed(system.file("extdata", "Q_centro.bed", package="pafr"))
dotplot(ali) + highlight_query(cen)
interval <- data.frame(chrom="T_chr3", start=2000000, end=3000000)
dotplot(ali, label_seqs=TRUE) +
highlight_target(interval)
}
| /man/highlight_dotplot.Rd | no_license | cran/pafr | R | false | true | 1,381 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dotplot.R
\name{highlight_query}
\alias{highlight_query}
\alias{highlight_target}
\title{Highlight segments of a query or target genome in a dot plot}
\usage{
highlight_query(bed, fill = "yellow", colour = "black", alpha = 0.6)
highlight_target(bed, fill = "yellow", colour = "black", alpha = 0.6)
}
\arguments{
\item{bed}{\code{data.frame} or \code{tbl_df} containing a bed file, as returned by
\code{\link{read_bed}}. Should contain three columns named 'chrom', 'start'
and 'end'}
\item{fill}{character Fill colour for highlight segment}
\item{colour}{character Outline colour for highlight segment}
\item{alpha}{character Opacity ([0-1]) for highlight segment}
}
\description{
This plot is intended to be used in conjunction with \code{link{dotplot}}.
Adding \code{higlight_query} or \code{highlight_target} to a dotplot function call
(see examples below) will add a rectangular 'highlight' corresponding to a
particular genomic interval in the corresponding genome.
}
\examples{
ali <- read_paf( system.file("extdata", "fungi.paf", package="pafr") )
cen <- read_bed(system.file("extdata", "Q_centro.bed", package="pafr"))
dotplot(ali) + highlight_query(cen)
interval <- data.frame(chrom="T_chr3", start=2000000, end=3000000)
dotplot(ali, label_seqs=TRUE) +
highlight_target(interval)
}
|
\name{register.fd}
\alias{register.fd}
\title{
Register Functional Data Objects Using a Continuous Criterion
}
\description{
A function is said to be aligned or registered with a target function
if its salient features, such as peaks, valleys and crossings of
fixed thresholds, occur at about the same argument values as those of
the target. Function \code{register.fd} aligns these features by
transforming or warping the argument domain of each function in a
nonlinear but strictly order-preserving fashion. Multivariate
functions may also be registered. If the domain is time, we say that
this transformation transforms clock time to system time. The
transformation itself is called a warping function.
}
\usage{
register.fd(y0fd=NULL, yfd=NULL, WfdParobj=NULL,
conv=1e-04, iterlim=20, dbglev=1, periodic=FALSE, crit=2)
}
\arguments{
\item{y0fd}{
a functional data object defining one or more target functions for
registering the functions in argument \code{yfd}. If the functions
to be registered are univariate, then \code{y0fd} may contain only a
single function, or it may contain as many functions as are in
\code{yfd}. If \code{yfd} contains multivariate functions, then
\code{y0fd} may either as many functions as there are variables in
\code{yfd}, or it may contain as many functions as are in \code{yfd}
and these functions must then be multivariate and be of the same
dimension as those in \code{yfd}.
If \code{yfd} is supplied as a named argument and \code{y0fd} is
not, then \code{y0fd} is computed inside the function to be the mean
of the functions in \code{yfd}.
If the function is called with a single unnamed argument, and there
is no other argument that is named as \code{y0fd} then this unnamed
argument is taken to be actually \code{yfd} rather than \code{y0fd},
and then also \code{y0fd} is computed to be the man of the functions
supplied.
}
\item{yfd}{
a functional data object defining the functions to be
registered to target \code{y0fd}. The functions may be either
univariate or multivariate.
If \code{yfd} contains a single multivariate function is to be
registered, it essential that the coefficient array for \code{y0fd}
have class \code{array}, have three dimensions, and that its second
dimension be of length 1.
}
\item{WfdParobj}{
a functional parameter object containing either a single function or
the same number of functions as are contained in \code{yfd}. The
coefficients supply the initial values in the estimation of a
functions $W(t)$ that defines the warping functions $h(t)$ that
register the set of curves. \code{WfdParobj} also defines the
roughness penalty and smoothing parameter used to control the
roughness of $h(t)$.
The basis used for this object must be a B-spline type, and the
order of the B-spline basis must be at least 2 (piecewise linear).
If WFDPAROBJ is not supplied, it is constructed from a bspline basis
of order 2 with 2 basis functions; that is, a basis for piecewise
linear functions. The smoothing parameter lambda for this default
is 0.
}
\item{conv}{
a criterion for convergence of the iterations.
}
\item{iterlim}{
a limit on the number of iterations.
}
\item{dbglev}{
either 0, 1, or 2. This controls the amount information printed out
on each iteration, with 0 implying no output, 1 intermediate output
level, and 2 full output. R normally postpones displaying these
results until the entire computation is computed, an option that it
calls "output buffering." Since the total computation time may be
considerable, one may opt for turning this feature off by
un-checking this box in the "Misc" menu item in the R Console.
}
\item{periodic}{
a logical variable: if \code{TRUE}, the functions are considered to
be periodic, in which case a constant can be added to all argument
values after they are warped.
}
\item{crit}{
an integer that is either 1 or 2 that indicates the nature of the
continuous registration criterion that is used. If 1, the criterion
is least squares, and if 2, the criterion is the minimum eigenvalue
of a cross-product matrix. In general, criterion 2 is to be
preferred.
}
}
\value{
a named list of length 4 containing the following components:
\item{regfd}{
A functional data object containing the registered functions.
}
\item{warpfd}{
A functional data object containing the warping functions $h(t)$.
}
\item{Wfd}{
A functional data object containing the functions $h W(t)$
that define the warping functions $h(t)$.
}
\item{shift}{
If the functions are periodic, this is a vector of time shifts.
}
\item{y0fd}{
The target function object y0fd.
}
\item{yfd}{
The function object yfd containing the functions to be registered.
}
}
\details{
The warping function that smoothly and monotonely transforms the
argument is defined by \code{Wfd} is the same as that defines the
monotone smoothing function in for function \code{smooth.monotone.}
See the help file for that function for further details.
}
\source{
Ramsay, James O., and Silverman, Bernard W. (2005), \emph{Functional
Data Analysis, 2nd ed.}, Springer, New York.
Ramsay, James O., and Silverman, Bernard W. (2002), \emph{Applied
Functional Data Analysis}, Springer, New York, ch. 6 & 7.
}
\seealso{
\code{\link{smooth.monotone}},
\code{\link{smooth.morph}}
\code{\link{plotreg.fd}}
\code{\link{register.newfd}}
}
\examples{
#See the analyses of the growth data for examples.
##
## 1. smooth the growth data for the Berkeley boys
##
# Specify smoothing weight
lambda.gr2.3 <- .03
# Specify what to smooth, namely the rate of change of curvature
Lfdobj.growth <- 2
# Set up a B-spline basis for smoothing the discrete data
nage <- length(growth$age)
norder.growth <- 6
nbasis.growth <- nage + norder.growth - 2
rng.growth <- range(growth$age)
wbasis.growth <- create.bspline.basis(rangeval=rng.growth,
nbasis=nbasis.growth, norder=norder.growth,
breaks=growth$age)
# Smooth the data
# in afda-ch06.R, and register to individual smooths:
cvec0.growth <- matrix(0,nbasis.growth,1)
Wfd0.growth <- fd(cvec0.growth, wbasis.growth)
growfdPar2.3 <- fdPar(Wfd0.growth, Lfdobj.growth, lambda.gr2.3)
hgtmfd.all <- with(growth, smooth.basis(age, hgtm, growfdPar2.3)$fd)
# Register the growth velocity rather than the
# growth curves directly
smBv <- deriv(hgtmfd.all, 1)
##
## 2. Register the first 2 Berkeley boys using the default basis
## for the warping function
##
# register.fd takes time, so use only 2 curves as an illustration
# to minimize compute time in these examples
nBoys <- 2
# Define the target function as the mean of the first nBoys records
smBv0 = mean.fd(smBv[1:nBoys])
# Register these curves. The default choice for the functional
# parameter object WfdParObj is used.
smB.reg.0 <- register.fd(smBv0, smBv[1:nBoys])
# plot each curve. Click on the R Graphics window to show each plot.
# The left panel contains:
# -- the unregistered curve (dashed blue line)
# -- the target function (dashed red line)
# -- the registered curve (solid blue line)
# The right panel contains:
# -- the warping function h(t)
# -- the linear function corresponding to no warping
plotreg.fd(smB.reg.0)
# Notice that all the warping functions all have simple shapes
# due to the use of the simplest possible basis
##
## 3. Define a more flexible basis for the warping functions
##
Wnbasis <- 4
Wbasis <- create.bspline.basis(rng.growth, Wnbasis)
Wfd0 <- fd(matrix(0,Wnbasis,1),Wbasis)
# set up the functional parameter object using only
# a light amount smoothing
WfdParobj <- fdPar(Wfd0, Lfdobj=2, lambda=0.01)
# register the curves
smB.reg.1 <- register.fd(smBv0, smBv[1:nBoys], WfdParobj)
plotreg.fd(smB.reg.1)
# Notice that now the warping functions can have more complex shapes
\dontrun{
##
## 4. Change the target to the mean of the registered functions ...
## this should provide a better target for registration
##
smBv1 <- mean.fd(smB.reg.1$regfd)
# plot the old and the new targets
par(mfrow=c(1,1),ask=FALSE)
plot(smBv1)
lines(smBv0, lty=2)
# Notice how the new target (solid line) has sharper features and
# a stronger pubertal growth spurt relative to the old target
# (dashed line). Now register to the new target
smB.reg.2 <- register.fd(smBv1, smBv[1:nBoys], WfdParobj)
plotreg.fd(smB.reg.2)
# Plot the mean of these curves as well as the first and second targets
par(mfrow=c(1,1),ask=FALSE)
plot(mean.fd(smB.reg.2$regfd))
lines(smBv0, lty=2)
lines(smBv1, lty=3)
# Notice that there is almost no improvement over the age of the
# pubertal growth spurt, but some further detail added in the
# pre-pubertal region. Now register the previously registered
# functions to the new target.
smB.reg.3 <- register.fd(smBv1, smB.reg.1$regfd, WfdParobj)
plotreg.fd(smB.reg.3)
# Notice that the warping functions only deviate from the straight line
# over the pre-pubertal region, and that there are some small adjustments
# to the registered curves as well over the pre-pubertal region.
}
##
## 5. register and plot the angular acceleration of the gait data
##
gaittime <- as.matrix(0:19)+0.5
gaitrange <- c(0,20)
# set up a fourier basis object
gaitbasis <- create.fourier.basis(gaitrange, nbasis=21)
# set up a functional parameter object penalizing harmonic acceleration
harmaccelLfd <- vec2Lfd(c(0, (2*pi/20)^2, 0), rangeval=gaitrange)
gaitfdPar <- fdPar(gaitbasis, harmaccelLfd, 1e-2)
# smooth the data
gaitfd <- smooth.basis(gaittime, gait, gaitfdPar)$fd
# compute the angular acceleration functional data object
D2gaitfd <- deriv.fd(gaitfd,2)
names(D2gaitfd$fdnames)[[3]] <- "Angular acceleration"
D2gaitfd$fdnames[[3]] <- c("Hip", "Knee")
# compute the mean angular acceleration functional data object
D2gaitmeanfd <- mean.fd(D2gaitfd)
names(D2gaitmeanfd$fdnames)[[3]] <- "Mean angular acceleration"
D2gaitmeanfd$fdnames[[3]] <- c("Hip", "Knee")
# register the functions for the first 2 boys
# argument periodic = TRUE causes register.fd to estimate a horizontal shift
# for each curve, which is a possibility when the data are periodic
# set up the basis for the warping functions
nwbasis <- 4
wbasis <- create.bspline.basis(gaitrange,nwbasis,3)
Warpfd <- fd(matrix(0,nwbasis,nBoys),wbasis)
WarpfdPar <- fdPar(Warpfd)
# register the functions
gaitreglist <- register.fd(D2gaitmeanfd, D2gaitfd[1:nBoys], WarpfdPar,
periodic=TRUE)
# plot the results
plotreg.fd(gaitreglist)
# display horizonal shift values
print(round(gaitreglist$shift,1))
}
\keyword{smooth}
| /fda/man/register.fd.Rd | no_license | sbgraves237/fda | R | false | false | 10,883 | rd | \name{register.fd}
\alias{register.fd}
\title{
Register Functional Data Objects Using a Continuous Criterion
}
\description{
A function is said to be aligned or registered with a target function
if its salient features, such as peaks, valleys and crossings of
fixed thresholds, occur at about the same argument values as those of
the target. Function \code{register.fd} aligns these features by
transforming or warping the argument domain of each function in a
nonlinear but strictly order-preserving fashion. Multivariate
functions may also be registered. If the domain is time, we say that
this transformation transforms clock time to system time. The
transformation itself is called a warping function.
}
\usage{
register.fd(y0fd=NULL, yfd=NULL, WfdParobj=NULL,
conv=1e-04, iterlim=20, dbglev=1, periodic=FALSE, crit=2)
}
\arguments{
\item{y0fd}{
a functional data object defining one or more target functions for
registering the functions in argument \code{yfd}. If the functions
to be registered are univariate, then \code{y0fd} may contain only a
single function, or it may contain as many functions as are in
\code{yfd}. If \code{yfd} contains multivariate functions, then
\code{y0fd} may either as many functions as there are variables in
\code{yfd}, or it may contain as many functions as are in \code{yfd}
and these functions must then be multivariate and be of the same
dimension as those in \code{yfd}.
If \code{yfd} is supplied as a named argument and \code{y0fd} is
not, then \code{y0fd} is computed inside the function to be the mean
of the functions in \code{yfd}.
If the function is called with a single unnamed argument, and there
is no other argument that is named as \code{y0fd} then this unnamed
argument is taken to be actually \code{yfd} rather than \code{y0fd},
and then also \code{y0fd} is computed to be the man of the functions
supplied.
}
\item{yfd}{
a functional data object defining the functions to be
registered to target \code{y0fd}. The functions may be either
univariate or multivariate.
If \code{yfd} contains a single multivariate function is to be
registered, it essential that the coefficient array for \code{y0fd}
have class \code{array}, have three dimensions, and that its second
dimension be of length 1.
}
\item{WfdParobj}{
a functional parameter object containing either a single function or
the same number of functions as are contained in \code{yfd}. The
coefficients supply the initial values in the estimation of a
functions $W(t)$ that defines the warping functions $h(t)$ that
register the set of curves. \code{WfdParobj} also defines the
roughness penalty and smoothing parameter used to control the
roughness of $h(t)$.
The basis used for this object must be a B-spline type, and the
order of the B-spline basis must be at least 2 (piecewise linear).
If WFDPAROBJ is not supplied, it is constructed from a bspline basis
of order 2 with 2 basis functions; that is, a basis for piecewise
linear functions. The smoothing parameter lambda for this default
is 0.
}
\item{conv}{
a criterion for convergence of the iterations.
}
\item{iterlim}{
a limit on the number of iterations.
}
\item{dbglev}{
either 0, 1, or 2. This controls the amount information printed out
on each iteration, with 0 implying no output, 1 intermediate output
level, and 2 full output. R normally postpones displaying these
results until the entire computation is computed, an option that it
calls "output buffering." Since the total computation time may be
considerable, one may opt for turning this feature off by
un-checking this box in the "Misc" menu item in the R Console.
}
\item{periodic}{
a logical variable: if \code{TRUE}, the functions are considered to
be periodic, in which case a constant can be added to all argument
values after they are warped.
}
\item{crit}{
an integer that is either 1 or 2 that indicates the nature of the
continuous registration criterion that is used. If 1, the criterion
is least squares, and if 2, the criterion is the minimum eigenvalue
of a cross-product matrix. In general, criterion 2 is to be
preferred.
}
}
\value{
a named list of length 4 containing the following components:
\item{regfd}{
A functional data object containing the registered functions.
}
\item{warpfd}{
A functional data object containing the warping functions $h(t)$.
}
\item{Wfd}{
A functional data object containing the functions $h W(t)$
that define the warping functions $h(t)$.
}
\item{shift}{
If the functions are periodic, this is a vector of time shifts.
}
\item{y0fd}{
The target function object y0fd.
}
\item{yfd}{
The function object yfd containing the functions to be registered.
}
}
\details{
The warping function that smoothly and monotonely transforms the
argument is defined by \code{Wfd} is the same as that defines the
monotone smoothing function in for function \code{smooth.monotone.}
See the help file for that function for further details.
}
\source{
Ramsay, James O., and Silverman, Bernard W. (2005), \emph{Functional
Data Analysis, 2nd ed.}, Springer, New York.
Ramsay, James O., and Silverman, Bernard W. (2002), \emph{Applied
Functional Data Analysis}, Springer, New York, ch. 6 & 7.
}
\seealso{
\code{\link{smooth.monotone}},
\code{\link{smooth.morph}}
\code{\link{plotreg.fd}}
\code{\link{register.newfd}}
}
\examples{
#See the analyses of the growth data for examples.
##
## 1. smooth the growth data for the Berkeley boys
##
# Specify smoothing weight
lambda.gr2.3 <- .03
# Specify what to smooth, namely the rate of change of curvature
Lfdobj.growth <- 2
# Set up a B-spline basis for smoothing the discrete data
nage <- length(growth$age)
norder.growth <- 6
nbasis.growth <- nage + norder.growth - 2
rng.growth <- range(growth$age)
wbasis.growth <- create.bspline.basis(rangeval=rng.growth,
nbasis=nbasis.growth, norder=norder.growth,
breaks=growth$age)
# Smooth the data
# in afda-ch06.R, and register to individual smooths:
cvec0.growth <- matrix(0,nbasis.growth,1)
Wfd0.growth <- fd(cvec0.growth, wbasis.growth)
growfdPar2.3 <- fdPar(Wfd0.growth, Lfdobj.growth, lambda.gr2.3)
hgtmfd.all <- with(growth, smooth.basis(age, hgtm, growfdPar2.3)$fd)
# Register the growth velocity rather than the
# growth curves directly
smBv <- deriv(hgtmfd.all, 1)
##
## 2. Register the first 2 Berkeley boys using the default basis
## for the warping function
##
# register.fd takes time, so use only 2 curves as an illustration
# to minimize compute time in these examples
nBoys <- 2
# Define the target function as the mean of the first nBoys records
smBv0 = mean.fd(smBv[1:nBoys])
# Register these curves. The default choice for the functional
# parameter object WfdParObj is used.
smB.reg.0 <- register.fd(smBv0, smBv[1:nBoys])
# plot each curve. Click on the R Graphics window to show each plot.
# The left panel contains:
# -- the unregistered curve (dashed blue line)
# -- the target function (dashed red line)
# -- the registered curve (solid blue line)
# The right panel contains:
# -- the warping function h(t)
# -- the linear function corresponding to no warping
plotreg.fd(smB.reg.0)
# Notice that all the warping functions all have simple shapes
# due to the use of the simplest possible basis
##
## 3. Define a more flexible basis for the warping functions
##
Wnbasis <- 4
Wbasis <- create.bspline.basis(rng.growth, Wnbasis)
Wfd0 <- fd(matrix(0,Wnbasis,1),Wbasis)
# set up the functional parameter object using only
# a light amount smoothing
WfdParobj <- fdPar(Wfd0, Lfdobj=2, lambda=0.01)
# register the curves
smB.reg.1 <- register.fd(smBv0, smBv[1:nBoys], WfdParobj)
plotreg.fd(smB.reg.1)
# Notice that now the warping functions can have more complex shapes
\dontrun{
##
## 4. Change the target to the mean of the registered functions ...
## this should provide a better target for registration
##
smBv1 <- mean.fd(smB.reg.1$regfd)
# plot the old and the new targets
par(mfrow=c(1,1),ask=FALSE)
plot(smBv1)
lines(smBv0, lty=2)
# Notice how the new target (solid line) has sharper features and
# a stronger pubertal growth spurt relative to the old target
# (dashed line). Now register to the new target
smB.reg.2 <- register.fd(smBv1, smBv[1:nBoys], WfdParobj)
plotreg.fd(smB.reg.2)
# Plot the mean of these curves as well as the first and second targets
par(mfrow=c(1,1),ask=FALSE)
plot(mean.fd(smB.reg.2$regfd))
lines(smBv0, lty=2)
lines(smBv1, lty=3)
# Notice that there is almost no improvement over the age of the
# pubertal growth spurt, but some further detail added in the
# pre-pubertal region. Now register the previously registered
# functions to the new target.
smB.reg.3 <- register.fd(smBv1, smB.reg.1$regfd, WfdParobj)
plotreg.fd(smB.reg.3)
# Notice that the warping functions only deviate from the straight line
# over the pre-pubertal region, and that there are some small adjustments
# to the registered curves as well over the pre-pubertal region.
}
##
## 5. register and plot the angular acceleration of the gait data
##
gaittime <- as.matrix(0:19)+0.5
gaitrange <- c(0,20)
# set up a fourier basis object
gaitbasis <- create.fourier.basis(gaitrange, nbasis=21)
# set up a functional parameter object penalizing harmonic acceleration
harmaccelLfd <- vec2Lfd(c(0, (2*pi/20)^2, 0), rangeval=gaitrange)
gaitfdPar <- fdPar(gaitbasis, harmaccelLfd, 1e-2)
# smooth the data
gaitfd <- smooth.basis(gaittime, gait, gaitfdPar)$fd
# compute the angular acceleration functional data object
D2gaitfd <- deriv.fd(gaitfd,2)
names(D2gaitfd$fdnames)[[3]] <- "Angular acceleration"
D2gaitfd$fdnames[[3]] <- c("Hip", "Knee")
# compute the mean angular acceleration functional data object
D2gaitmeanfd <- mean.fd(D2gaitfd)
names(D2gaitmeanfd$fdnames)[[3]] <- "Mean angular acceleration"
D2gaitmeanfd$fdnames[[3]] <- c("Hip", "Knee")
# register the functions for the first 2 boys
# argument periodic = TRUE causes register.fd to estimate a horizontal shift
# for each curve, which is a possibility when the data are periodic
# set up the basis for the warping functions
nwbasis <- 4
wbasis <- create.bspline.basis(gaitrange,nwbasis,3)
Warpfd <- fd(matrix(0,nwbasis,nBoys),wbasis)
WarpfdPar <- fdPar(Warpfd)
# register the functions
gaitreglist <- register.fd(D2gaitmeanfd, D2gaitfd[1:nBoys], WarpfdPar,
periodic=TRUE)
# plot the results
plotreg.fd(gaitreglist)
# display horizonal shift values
print(round(gaitreglist$shift,1))
}
\keyword{smooth}
|
# plot3.R
library(data.table)
## We will be using data from the dates 2007-02-01 and 2007-02-02.
data <- fread("grep \"^[12]/2/2007\" exdata_data_household_power_consumption/household_power_consumption.txt",sep=";",na.strings=c("?","NA","")) # the "data.table" package
dateandtime <- strptime(paste(data$V1,data$V2),format="%d/%m/%Y %H:%M:%S")
setnames(data,names(data),new=c("Date","Time","Global_active_power",
"Global_reactive_power","Voltage",
"Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"))
png("plot3.png")
plot(dateandtime, data$Sub_metering_1, col=1, typ="l",
xlab="", ylab="Energy sub metering")
lines(dateandtime,data$Sub_metering_2, col=2)
lines(dateandtime,data$Sub_metering_3, col=4)
legend(x="topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1), col=c(1,2,4))
dev.off() | /plot3.R | no_license | iizuka-t/ExData_Plotting1 | R | false | false | 920 | r | # plot3.R
library(data.table)
## We will be using data from the dates 2007-02-01 and 2007-02-02.
data <- fread("grep \"^[12]/2/2007\" exdata_data_household_power_consumption/household_power_consumption.txt",sep=";",na.strings=c("?","NA","")) # the "data.table" package
dateandtime <- strptime(paste(data$V1,data$V2),format="%d/%m/%Y %H:%M:%S")
setnames(data,names(data),new=c("Date","Time","Global_active_power",
"Global_reactive_power","Voltage",
"Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"))
png("plot3.png")
plot(dateandtime, data$Sub_metering_1, col=1, typ="l",
xlab="", ylab="Energy sub metering")
lines(dateandtime,data$Sub_metering_2, col=2)
lines(dateandtime,data$Sub_metering_3, col=4)
legend(x="topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1), col=c(1,2,4))
dev.off() |
# R-Data Types
# Logical
v <- TRUE
print(class(v))
# Numeric
v <- 2.5
print(class(v))
# Integer
v <- 2L
print(class(v))
# Complex
v <- 3 + 2i
print(class(v))
# Character
v <- "h"
print(class(v))
# Raw
v <- charToRaw("Hello")
print(class(v))
# Vectors
apple <- c('red', 'green', 'yellow')
print(apple)
print(class(apple))
# List
myList <- list(c(2,3,4), "animal",3.5,sin)
print(myList)
print(class(myList))
# Matrix
myMatrix <- matrix(c('a','b','c','d','e','f'), nrow = 2, byrow = TRUE, dimnames = NULL)
print(myMatrix)
print(class(myMatrix))
# Factors
apple_colors <- c("green", "red", "yellow", "yellow", "red", "red", "green")
factor_apple <- factor(apple_colors)
print(factor_apple)
print(nlevels(factor_apple))
# Data Frames
BMI <- data.frame(
gender = c("male", "male", "female"),
height = c(152, 171.5, 165),
weight = c(81, 93, 78),
Age = c(42, 38, 26)
)
print(BMI)
print(class(BMI))
| /tutorialspoint/dataTypes.R | no_license | AdCalzadilla/aprendiendo_R | R | false | false | 960 | r | # R-Data Types
# Logical
v <- TRUE
print(class(v))
# Numeric
v <- 2.5
print(class(v))
# Integer
v <- 2L
print(class(v))
# Complex
v <- 3 + 2i
print(class(v))
# Character
v <- "h"
print(class(v))
# Raw
v <- charToRaw("Hello")
print(class(v))
# Vectors
apple <- c('red', 'green', 'yellow')
print(apple)
print(class(apple))
# List
myList <- list(c(2,3,4), "animal",3.5,sin)
print(myList)
print(class(myList))
# Matrix
myMatrix <- matrix(c('a','b','c','d','e','f'), nrow = 2, byrow = TRUE, dimnames = NULL)
print(myMatrix)
print(class(myMatrix))
# Factors
apple_colors <- c("green", "red", "yellow", "yellow", "red", "red", "green")
factor_apple <- factor(apple_colors)
print(factor_apple)
print(nlevels(factor_apple))
# Data Frames
BMI <- data.frame(
gender = c("male", "male", "female"),
height = c(152, 171.5, 165),
weight = c(81, 93, 78),
Age = c(42, 38, 26)
)
print(BMI)
print(class(BMI))
|
source("GPS-helper.R")
# Convert GPS latitude and longitude into grid coordinate.
calculateTM2 <- function(GPSData){
data <- GPSData[,2:3]
names(data) = c("lat","long")
# get TM2 xy-coord (m)
n <- nrow(data)
TM2 <- array(dim=c(n, 5))
TM2[, 1] <- GPSData[, 1]
for(i in 1:n)
TM2[i,3:2] <- WGS84toTWD97(data[i,1], data[i,2]) # Latitude => y, longtitude => x
colnames(TM2) <- c("Time", "x", "y", "speed", "heading")
return (TM2)
}
recordTM2 <- function(TM2){
write.table(TM2, file = "TM2.txt", col.names = TRUE, row.names = FALSE)
}
########################################################################
calculateCurvature <- function(curvature){
n <- nrow(curvature)
for(i in 2:(n-1)){
a <- ((curvature[i - 1, 2] - curvature[i, 2])^2+(curvature[i - 1, 3] - curvature[i, 3])^2)^0.5
b <- ((curvature[i, 2] - curvature[i + 1, 2])^2+(curvature[i, 3] - curvature[i + 1, 3])^2)^0.5
c <- ((curvature[i - 1, 2] - curvature[i + 1, 2])^2+(curvature[i - 1, 3] - curvature[i + 1, 3])^2)^0.5
theta <- (a^2 + b^2 - c^2) / (2 * a * b)
curvature[i, 4] <- 1 / (c / (2 * (1 - theta^2)) ^ 0.5)
# make cross to check direction whether it is left or right
if(!is.na(curvature[i, 4])){
x1 <- curvature[i, 2] - curvature[i - 1, 2]
x2 <- curvature[i + 1, 2] - curvature[i, 2]
y1 <- curvature[i, 3] - curvature[i - 1, 3]
y2 <- curvature[i + 1, 3] - curvature[i, 3]
if(x1 * y2 - x2 * y1 > 0){
curvature[i, 4] <- curvature[i, 4] * -1
}
}
}
return (curvature)
}
recordCurvature <- function(curvature){
curvature <- as.data.frame(curvature)
names(curvature) <- c("Time", "x", "y", "curvature")
write.table(curvature, file = "curvature.txt", col.names = TRUE, row.names = FALSE)
}
#########################################################################
calculateVelocity <- function(velocity, gapN = 1){
n <- nrow(velocity)
for(i in 1:n){
L <- as.integer(i - gapN / 2)
R <- as.integer(i + gapN / 2)
if(L < 1) L <- 1
if(R > n) R <- n
# km / s
vx <- (velocity[R, 2] - velocity[L, 2]) / 1000
vy <- (velocity[R, 3] - velocity[L, 3]) / 1000
# km / hr
velocity[i, 4] <- (sqrt(vx^2+vy^2) / gapN) * 3600
}
return (velocity)
}
recordVelocity <- function(velocity){
velocity <- as.data.frame(velocity)
names(velocity) <- c("Time", "x", "y", "speed")
write.table(curvature, file = "curvature.txt", col.names = TRUE, row.names = FALSE)
}
################################################################################## | /Special topic about car/GPS.R | no_license | Sharknevercries/NCU-works | R | false | false | 2,793 | r | source("GPS-helper.R")
# Convert GPS latitude and longitude into grid coordinate.
calculateTM2 <- function(GPSData){
data <- GPSData[,2:3]
names(data) = c("lat","long")
# get TM2 xy-coord (m)
n <- nrow(data)
TM2 <- array(dim=c(n, 5))
TM2[, 1] <- GPSData[, 1]
for(i in 1:n)
TM2[i,3:2] <- WGS84toTWD97(data[i,1], data[i,2]) # Latitude => y, longtitude => x
colnames(TM2) <- c("Time", "x", "y", "speed", "heading")
return (TM2)
}
recordTM2 <- function(TM2){
write.table(TM2, file = "TM2.txt", col.names = TRUE, row.names = FALSE)
}
########################################################################
calculateCurvature <- function(curvature){
n <- nrow(curvature)
for(i in 2:(n-1)){
a <- ((curvature[i - 1, 2] - curvature[i, 2])^2+(curvature[i - 1, 3] - curvature[i, 3])^2)^0.5
b <- ((curvature[i, 2] - curvature[i + 1, 2])^2+(curvature[i, 3] - curvature[i + 1, 3])^2)^0.5
c <- ((curvature[i - 1, 2] - curvature[i + 1, 2])^2+(curvature[i - 1, 3] - curvature[i + 1, 3])^2)^0.5
theta <- (a^2 + b^2 - c^2) / (2 * a * b)
curvature[i, 4] <- 1 / (c / (2 * (1 - theta^2)) ^ 0.5)
# make cross to check direction whether it is left or right
if(!is.na(curvature[i, 4])){
x1 <- curvature[i, 2] - curvature[i - 1, 2]
x2 <- curvature[i + 1, 2] - curvature[i, 2]
y1 <- curvature[i, 3] - curvature[i - 1, 3]
y2 <- curvature[i + 1, 3] - curvature[i, 3]
if(x1 * y2 - x2 * y1 > 0){
curvature[i, 4] <- curvature[i, 4] * -1
}
}
}
return (curvature)
}
recordCurvature <- function(curvature){
curvature <- as.data.frame(curvature)
names(curvature) <- c("Time", "x", "y", "curvature")
write.table(curvature, file = "curvature.txt", col.names = TRUE, row.names = FALSE)
}
#########################################################################
calculateVelocity <- function(velocity, gapN = 1){
n <- nrow(velocity)
for(i in 1:n){
L <- as.integer(i - gapN / 2)
R <- as.integer(i + gapN / 2)
if(L < 1) L <- 1
if(R > n) R <- n
# km / s
vx <- (velocity[R, 2] - velocity[L, 2]) / 1000
vy <- (velocity[R, 3] - velocity[L, 3]) / 1000
# km / hr
velocity[i, 4] <- (sqrt(vx^2+vy^2) / gapN) * 3600
}
return (velocity)
}
recordVelocity <- function(velocity){
velocity <- as.data.frame(velocity)
names(velocity) <- c("Time", "x", "y", "speed")
write.table(curvature, file = "curvature.txt", col.names = TRUE, row.names = FALSE)
}
################################################################################## |
# load and if needed install package(s)
#
# Author: lbeaulaton
###############################################################################
#' @title Load library
#' @description load and if needed install package(s)
#' @param library name of the library/ries to be loaded
load_library = function(library)
{
if(!all(library %in% installed.packages()[, 'Package']))
install.packages(library[!library %in% installed.packages()[, 'Package']], dep = T)
for(i in 1:length(library))
require(library[i], character.only = TRUE)
}
#' @title load_package function, same as above but individual, and not using installed.packages
#' @description load and if needed install package(s)
#' @param x name of the library/ries to be loaded
load_package <- function(x)
{
if (!is.character(x)) stop("Package should be a string")
if (!require(x,character.only = TRUE))
{
install.packages(x, lib=.libPaths()[1],repos="http://cran.us.r-project.org", dep=TRUE)
if(!require(x,character.only = TRUE)) stop("Package not found")
}
} | /R/Rmarkdown/utilities/load_library.R | no_license | ices-eg/wg_WGEEL | R | false | false | 1,032 | r | # load and if needed install package(s)
#
# Author: lbeaulaton
###############################################################################
#' @title Load library
#' @description load and if needed install package(s)
#' @param library name of the library/ries to be loaded
load_library = function(library)
{
if(!all(library %in% installed.packages()[, 'Package']))
install.packages(library[!library %in% installed.packages()[, 'Package']], dep = T)
for(i in 1:length(library))
require(library[i], character.only = TRUE)
}
#' @title load_package function, same as above but individual, and not using installed.packages
#' @description load and if needed install package(s)
#' @param x name of the library/ries to be loaded
load_package <- function(x)
{
if (!is.character(x)) stop("Package should be a string")
if (!require(x,character.only = TRUE))
{
install.packages(x, lib=.libPaths()[1],repos="http://cran.us.r-project.org", dep=TRUE)
if(!require(x,character.only = TRUE)) stop("Package not found")
}
} |
0.15134928934065528 0.16923374403608815 0.29405604975036537 0.5453877916979596 0.740269764474385 0.7227189430779608 0.783288431508119 0.8596583566574633 0.4406391355546023 0.6278452086400551 0.35540091250302586 0.9779152850210026 0.3472763887137903 0.8293192957485583 0.5969423537395386 0.2923752614591918 0.8276761148186407 0.9738780243927885 0.2605199781183183 0.6058862957489728 0.43147579160962646 0.3280098937570334 0.6802026499770367 0.36947784340057366 0.6488391115524473 0.056959871961363984 0.006925052996174519 0.22163900199226605 0.7746216807696157 0.4141202938849511 0.6228095135757374 0.34375989561878717 0.847100029733151 0.6408872855004846 0.945862135014187 0.16277261404785792 0.873207895890835 0.4092884127153995 0.7202885058965115 0.22987915833275996 0.32663902146946544 0.45316423496561253 0.8027132823612538 0.6833567426018462 0.2108387710995443 0.6284126237230672 0.400877254453715 0.5637805951549757 0.8158215134329276 0.018781529072221193 0.6989245104391518 0.3916076771554198 0.6659144847877265 0.8609224644141709 0.8121810246467727 0.8399150121086763 0.054943251169296436 8.930768506568709E-4 0.05436572544486862 0.039188451745382546 0.9960374474686887 0.31402456702307446 0.5714635257283692 0.036409473601139664 0.3751498532593581 0.02104537729693745 0.49902910930316624 0.19728852077321268 0.5905462289341923 0.1155798339512859 0.9334794494977533 0.8883454815906879 0.9849365514598085 0.18161659544394737 0.17297200181510897 0.6046395569353348 0.932583201895332 0.7242683894992861 0.879754394566761 0.8193013945498483 0.5306352271916501 0.7631212888795492 0.04341085115422194 0.22262354424587338 0.4621020830146685 0.8446980832077167 0.8832192185794456 0.6913151642210303 0.7472941299341299 0.8190370014750399 0.11787212211381914 0.2826294322754068 0.7812215708635728 0.6951134563379917 0.35210421046260165 0.7834239040200454 0.26635705261552733 0.6408093471440188 0.6823067108854267 0.8608144128382796 0.39333170129654405 0.7253607307420218 0.007000117637488312 0.6589362255227986 0.8965481765732677 0.3156912714038709 0.9430738377282262 0.7654264056269591 0.41735854608175305 0.13276561989240787 0.6949917555461664 0.8315937451317738 0.9443685043578994 0.8500731955163192 0.7017010035623465 0.7718437767730896 0.9685941062475726 0.7735797820931137 0.39477066072805045 0.39077809223840887 0.39061057724115067 0.5809601705635072 0.9413259892965111 0.49048861606341265 0.14749736126047042 0.31765269329023194 0.416632969285503 0.9959194260972141 0.9892974031531829 0.8482521129367111 0.4537271039432562 0.4027696638454282 0.8278726027877118 0.7439844391278878 0.05126885531420511 0.9002630833003733 0.6054518672261637 0.15048358387027216 0.8858483383350633 0.8502114592201261 0.6858316176572741 0.5248164644392163 0.17124962918074393 0.9141501171995184 0.3581032435732733 0.2566794154658476 0.8347251611080428 0.01192571860027758 0.1609457966143556 0.09591599588429311 0.5391190164876639 0.4679684629221297 0.38493637267510694 0.9969820125402167 0.869717546711288 0.1555811362520041 0.2909943833095945 0.46071063203257334 0.8259944270357804 0.1849306904949254 0.2949905774880075 0.7047071915352887 0.6109563669893062 0.6970281001858529 0.6117151561852162 0.02592921148248839 0.5945850102954855 0.6238346550386018 0.6258137559864184 0.5324843187683221 0.2429093859183581 0.8537344032563869 0.5629638694529346 0.016405281952401318 0.7505322694439404 0.10196326731660543 0.8714785965939216 0.11513718457790856 0.091973190209791 0.9271882267696819 0.09268854466414911 0.06669957150821626 0.21597969520375704 0.7582798202621702 0.8978666596888553 0.3019618599314675 0.14699721340480776 0.4994246925245748 0.4001250657261337 0.8371973601787457 0.829277809938255 0.26707472968785273 0.811626906560726 0.9310578751922616 0.6516461042173706 0.07883211902602605 0.009620116710693738 0.7523372499816805 0.7247305095441868 0.8622205047814067 0.5992146797736324 0.8683686919401926 0.147506753211876 0.9803944342586317 0.6164878780560873 0.20862762649203748 0.696289309663859 0.5821905607230724 0.10241761945536587 0.7840880236112268 0.5968462956275233 0.4755257540987774 0.3963065577615863 0.8501068703916743 0.29910151041741007 0.8959987113798626 0.1467745865236313 0.3890881174107419 0.4639270393575652 0.49665358620405164 0.7190612998353457 0.7589037089892952 0.69281862540685 0.12021509597295144 0.8892937867954103 0.10402620308835375 0.8837849413691669 0.43170676531595753 0.45953933052812 0.658553863007083 0.48057140956759337 0.28721514670575155 0.7018767406856155 0.38378548179983973 0.29737766352844774 0.7936644680558324 0.3453839665698788 0.4777293860739634 0.04751749634295854 0.3748177610307165 0.15685379325215842 0.16990677868732618 0.18200848093914623 0.8003224206005933 0.034213412997089576 0.8720368149687336 0.15286816502188227 0.05746872674891079 0.9310378316453222 0.19366721005161758 0.7559323047274783 0.14943280378041202 0.8612109226336531 0.5515403268873841 0.7793001816600968 0.38912585419302603 0.6947023294016534 0.10246204090189959 0.7860502041403399 0.5243737678192608 0.4485922075603559 0.5116663719551353 0.49365946539301775 0.07800225856865783 0.3018898556490448 0.6321460614049496 0.8775983993167811 0.615505083612174 0.6183566671852478 0.5151457909930021 0.8228621435809774 0.26332330048078856 0.8484574741370255 0.29799062076618443 0.8747382064574487 0.6923660588270053 0.2997285989212879 0.4226499687157935 0.39889984445260185 0.9542545143393767 0.41426523721415665 0.3269127290210684 0.25095315262108664 0.011872044558847272 0.37269438238896135 0.21977680140336497 0.8768551706665761 0.5411557569995201 0.5559139239950674 0.803632179004692 0.6181593767943402 0.5976655341650258 0.6390424215484029 0.8380432750911875 0.8798336883675332 0.03931851144932097 0.8664383544988984 0.25341349385881795 0.7334357505307897 0.17577860469192708 0.362311425896793 0.11754048547611473 0.4747186657289567 0.9753421664967855 0.16836264506852516 0.13404821275995227 0.6322798720583661 0.3017622927396397 0.0680540461035627 0.5089075454061284 0.932654783800123 0.07630593159740684 0.15652325256459088 0.9597344319864411 0.7672959722703001 0.9644398116378606 0.5458212142511744 0.36228065073419347 0.7781671952110039 0.1718921705208587 0.5570666867337812 0.02950935987731429 0.28122165390855747 0.2014394029447224 0.24427434561504568 0.9008733555242345 0.380821937485317 0.9696178949548353 0.11902752217853263 0.9828915420305544 0.7931987349451138 0.5116632299530938 0.5008310427230287 0.22684463286316914 0.5933342627801635 0.6133301495904028 0.004893313656125886 0.6553886568545436 0.32719540804266334 0.09984854580026314 0.29725075082285146 0.9571105711013322 0.19003086845069528 0.18512202233631503 0.3965625900267403 0.9565273813581746 0.2614852180681121 0.23837747681033294 0.48256660899496506 0.33675639353393794 0.10957238909257516 0.5608252421746578 0.38310219530229195 0.7430613390520862 0.42808961505503584 0.28149149720304256 0.654769744873527 0.9277046140930081 0.7558424399344792 0.27923561401753616 0.44671012976389013 0.9100137565063817 0.7410900241727074 0.11706701775473272 0.14265229446586636 0.22029590530370668 0.8609093710677268 0.33522776716723013 0.11329018195147844 0.1722106243704521 0.9827158846240687 0.091108201798073 0.7792153428790969 0.7654388412517965 0.6639670447370284 0.05691516126572849 0.5840447652638132 0.25896090744387434 0.6261147405912846 0.6901133044863957 0.408048059993644 0.6749846522319016 0.4101608920369869 0.47701184868780777 0.41449194449854376 0.4945399795836207 0.8237106796890606 0.5164708796772549 0.31408418963944884 0.5879833847912919 0.42320536007266496 0.8585521533304041 0.5573130437188033 0.9871325821673871 0.9400575671228109 0.3420419789977017 0.020142441986095938 0.4504776112894524 0.25569926669917453 0.2851243549705108 0.3354371709533642 0.8988914446453724 0.2580930059334092 0.27508456078602284 0.1406565528218574 0.7064900305340904 0.6710451217445385 0.47577502343209 0.16176865623599512 0.5197677839439345 0.7376994342727077 0.3256765538474 0.2340018053020605 0.7478612004713907 0.4608785220488456 0.23473984586507046 0.07196388005210297 0.11777029064002809 0.9554786998734861 0.9047936521572074 0.8448237703911885 0.7205934688196889 0.351914401930269 0.19615743491472737 0.3866638090732656 0.7883083641844352 0.6046461575795695 0.04219814388836973 0.17777587531460903 0.03535773750514681 0.5061156727162619 0.7739221771513527 0.06958797864025823 0.775276063531663 0.18395068225384192 0.3582949924575325 0.09407187661930483 0.17981879151247038 0.36809107020340326 0.4768327361983845 0.59789702118614 0.9865331836550318 0.7545268887990494 0.0751115400216924 0.37225949698221217 0.6804359844714496 0.8636282577742702 0.9991919346527752 0.4112290851792352 0.6764394996217313 0.893585586530676 0.6015569542624893 0.4246215333335418 0.3113951233510671 0.20326834093313562 0.7470147856979584 0.2558170724142206 0.5180983726395331 0.22384896080245886 0.4781176026252951 0.9106106432796219 0.8983279795035073 0.7948282060673159 0.7100004506572624 0.9900926417281805 0.5379662142353546 0.14763665068485865 0.7161449442368775 0.9387240950408987 0.31447570801406766 0.018776816844227606 0.8394248710363834 0.6681831693623639 0.6354838167128443 0.6645087909139565 0.8168917138868418 0.7982246804390499 0.5169522226196929 0.8703717990945914 0.34210984385219056 0.9677082026423719 0.6603861131044112 0.4013727606058004 0.005453040450691615 0.41211409850059133 0.7207640838236173 0.7224625742517088 0.5506250429273127 0.2782813662295093 0.27034600968641953 0.2339591481601283 0.4647686243170015 0.9527649570568675 0.05824288041360848 0.7128660667234499 0.2798794180494526 0.01712408625267814 0.04160942796767597 0.12857393561880892 0.6048420126995127 0.31161782656767467 0.32154650554910646 0.5102216550488083 0.8302575731546291 0.98819739704735 0.5552769831530932 0.08303261306860699 0.6523336114867061 0.36434942649318425 0.6358854938892666 0.46223119104998533 0.04745830794193007 0.8512581138546065 0.4318889219025973 0.2956828016705182 0.09568339055327846 0.2229182517815428 0.16113075387197517 0.38019068977663517 0.09919066330865611 0.017245479744594894 0.5369696926108981 0.9414213715767618 0.7709196455145925 0.9158938315555272 0.16945879456061796 0.46371950700265074 0.8788843644407962 0.5364047414807418 0.830188137728838 0.18361253646012998 0.6233100038165256 0.18847612873130615 0.09669101213667253 0.11959386784244519 0.19657054584009848 0.7955536508303611 0.5807895397909083 0.30081309718129723 0.4300110604935302 0.36361084796959264 0.4149827554752935 0.6785903471802703 0.16708922743958154 0.09089839819206402 0.921956188672483 0.057210966141498676 0.018872660123977147 0.45560743636372225 0.4245191456638927 0.2642460686219782 0.45577965705674983 0.3928051017054792 0.9667520833431681 0.6956833460118579 0.768553351876624 0.9776656262213257 0.31187303362475693 0.31744148343308953 0.2918097871706816 0.8689330532398838 0.958308636805495 0.18238089555245007 0.42510302672670197 0.13471699887032762 0.07268219736157755 0.5307123099080598 0.40930120243792956 0.9821360306917699 0.17690712414522325 0.5817438762327694 0.5201691966802376 0.34298764981394014 0.1463014237612532 0.19619660639331515 0.32640569887817705 0.22953801954897834 0.5766146157226129 0.6908567837300399 0.9259828878321975 0.26768838201131573 0.8600860176772955 0.22112340194675695 0.6224897938916862 0.8391894072822641 0.3741290236722228 0.31204749526994446 0.33228080826275064 0.8202386044660767 0.030962867513939507 0.31474913352778977 0.12358334568774065 0.9907100605771518 0.06249026451091533 0.7926374890402713 0.680319531629238 0.8064300127221001 0.32194534926696483 0.10064790391863687 0.15624861344018193 0.3763253564175243 0.5723131287687213 0.7107489803775399 0.839138469216852 0.16693674253883417 0.20124841430716955 0.15826227865757725 0.28002595910098504 0.1687017334959966 0.5531984059568281 0.8360755533088491 0.059227453262002405 0.0015776188601083208 0.601659911894503 0.24762025620525774 0.15861254453971374 0.7119875294887145 0.9979786529994705 0.7445223780181651 0.9887554137387278 0.8187374223167132 0.6278246481709913 0.22112534569155162 0.47992999397180147 0.5024659612422716 0.6824544168365069 0.7156224065547808 0.2875163365005794 0.7013528475307759 0.9090970314547874 0.3763252598042456 0.0852377106196276 0.27522581224470877 0.1432776942042323 0.7916344510886472 0.12374878380265886 0.3308697997907254 0.4456192302665718 0.25356625402457567 0.7174476760083011 0.26325679618699904 0.1666384020483077 0.12723002131804706 0.08898518744797901 0.06057809924564339 0.04528022452405933 0.13243868682612003 0.048797234575608694 0.9453752429433675 0.010410485114164314 0.00867440065122016 0.3160870540217997 0.4797298592081871 0.6992128029957078 0.7309203924578119 0.898035997471842 0.8493866361925961 0.8324135906309563 0.17140886011914813 0.8356227618446818 0.15984779397939364 0.4934842227255102 0.8611242083373837 0.7972832266679306 0.22689132701149495 0.04744691428267711 0.29493986962072116 0.4557435303143692 0.5246297451362927 0.9046370247264233 0.045555585574331925 0.3187117810935314 0.6661394534737901 0.5857510356238045 0.8039418403790458 0.700714367927113 0.03485565934389734 0.5767362208432121 0.3477866917039394 0.8212150529787255 0.9593414129373538 0.27555441418831483 0.35447922095721096 0.593256138442088 0.12970320977495176 0.9275789810038151 0.9214534333403439 0.5272130431762007 0.7908403417361185 0.8223604762081411 0.6342548159884001 0.3158546648604308 0.615878420858875 0.18172329653033037 0.03148139804825756 0.08975383227421296 0.06255237526489477 0.10763182162888374 0.3557899339018279 0.03619358437725051 0.445967421388053 0.6779916667723407 0.5514469270744894 0.06006667823359646 0.9230814635484027 0.7795275036905895 0.5714431508430051 0.4318669261322069 0.07741567825234141 0.549106158073808 0.19097989193357912 0.915723153821305 0.8169146270414426 0.35834984370618317 0.6891734535320003 0.6995275959257878 0.853540947588636 0.12111099409496451 0.957114110817214 0.5595622789022715 0.12010406267306739 0.3293569940756883 0.8179545259064595 0.0836284475320318 0.09798607954953831 0.6302391382591379 0.6034917915572233 0.44953614281845933 0.17884233505897007 0.9455974513985006 0.32174602424944254 0.2099115212778041 0.8640180699401661 0.1048787814767812 0.07132824258562398 0.7696322624895299 0.8173367872234657 0.7134227401724301 0.7109281809657415 0.7588023157838847 0.5300796215870037 0.5102963523788349 0.17604303745583672 0.3020286573739428 0.6313897559325716 0.7833347297120974 0.25010614050028224 0.26386506841874835 0.682999258510558 0.50377206006924 0.04346406063306185 0.9106383265091678 0.6340902863953068 0.7188233800923061 0.9111901216931182 0.07047521745630059 0.9802664420302258 0.3978042215502833 0.04951031464413058 0.7961988479995021 0.0700241326003932 0.36585189383903793 0.5506577579105523 0.7398654908750264 0.932777947939979 0.17903879361252018 0.26726028137326685 0.9330639724346895 0.464408028618122 0.7956445401824698 0.43990480610444316 0.4337017426357518 0.7994615590024993 0.6805801750784476 0.6753116058353693 0.347852833750285 0.22355447324218325 0.5434345681789138 0.5275903302700721 0.06434482491328208 0.035183577769529406 0.25533964703760603 0.6313385152336382 0.15011346274844783 0.3530734081819953 0.6158272743504062 0.0628830347423659 0.24595477358131057 0.9833461320196877 0.5480776287209902 0.3647533153278608 0.47515672597900727 0.1102168608169769 0.9894353803785229 0.458514532425887 0.3315934687659897 0.7545808299755736 0.21379607939954692 0.49237816085334485 0.7492168760105813 0.656773043482009 0.5734426135155927 0.5225460520350307 0.07148215952626913 0.4829447599431562 0.6801372016028968 0.8110332855546185 0.9370949624514655 0.028915853635657962 0.8587516245203484 0.437754704715969 0.6775699623223271 0.4010252052707558 0.3014447238889951 0.8709478811233096 0.6824415505203923 0.9904163637379543 0.7263726498712435 0.11754579177503954 0.3295696850772978 0.42535922573280516 0.5021441345277041 0.0839002501885745 0.5008347059542336 0.5856222228375593 0.13770937343944378 0.8172971323240865 0.4076171386476837 0.1935746625002145 0.7570979431633168 0.36999296816407323 0.4358097005033289 0.45861149163988546 0.6793975822614341 0.13004170630293677 0.9867102949582381 0.7446582633171103 0.5065921555544907 0.5969695971434774 0.8379966277695098 0.2549269299135527 0.1853537793984068 0.7453950312737755 0.3780843716603196 0.2837578054207379 0.3536107015583032 0.3159792752443714 0.17752221227414966 0.6722972356550041 0.9086891026532143 0.6006334119854104 0.7526794037351777 0.7229238457690723 0.1410532787505694 0.5476951657989906 0.04983194935724411 0.9767512325169201 0.9661455556698577 0.3028292799365122 0.13185612113085443 0.9391980282669918 0.7565536722240999 0.6226932164515263 0.785088925850078 0.050531437865032114 0.027710131154291995 0.9509499183980147 0.2805167450082219 0.9414534359945276 0.7625541247157487 0.3299974882319242 0.32130561558467585 0.2882132240358094 0.4740637805874576 0.7368258124755026 0.2877868122574859 0.38739404478791317 0.19929255683360791 0.8776980799828108 0.25136479851691573 0.9074356698834941 0.4945206984106095 0.9538949718258348 0.8704142310787415 0.14211872892404487 0.15835839779971328 0.4195482285844414 0.20408587517476262 0.49065059086044027 0.8481149421090765 0.2926291303327049 0.6840418933547486 0.8196567485992066 0.9744737147209087 0.1514465432990777 0.9694928071648856 0.824295881972136 0.2604900573402016 0.551548654643125 0.17751011366473912 0.49888050522879634 0.13315551256194402 0.23171081311865804 0.028440022504342632 0.3517993861793802 0.6792482005403713 0.14273186470385868 0.7753450791063915 0.055202099378138136 0.2419159002840331 0.02904951680052259 0.3967525388623382 0.8068196088535095 0.8604151359620422 0.020725262923662502 0.42358948291723697 0.7994597679237572 0.6626557047503036 0.45213739251866736 0.944036323103771 0.8431294536042441 0.008676951375160447 0.9137609338939057 0.7841319656468891 0.3654004893860443 0.6529085631374932 0.3647011893647124 0.597315505298027 0.7275492926354661 0.4440452195611714 0.9861873803254305 0.6126909333743319 0.03716513950649181 0.11359112760938739 0.34002197997779715 0.08965236872718896 0.41233567360837076 0.5176065615927231 0.3110197367592562 0.10560986100939584 0.9581822103389436 0.2909159883082586 0.2587602384939749 0.16390650308911536 0.5534317750016267 0.2490128249372 0.7815162749095004 0.25061949371046743 0.9128765568844295 0.030123373102896922 0.5040098768298074 0.39178948201381836 0.2909858448927609 0.40836843587421845 0.34068658340257785 0.4667238383907215 0.6225992515942512 0.42868799918149403 0.7574875403482754 0.018371116812465837 0.8585442045369832 0.015170803989094539 0.6576385071344032 0.06836581283033971 0.7471850480237928 0.9866572317855172 0.6507634080757722 0.49219356562104977 0.3373245375776026 0.476960315880674 0.6296694599776548 0.3390290196839103 0.03354330299372554 0.6017767985129656 0.7754398208700387 0.5911497596399808 0.12197615802296013 0.840335877801919 0.33900878627889564 0.24943166936266437 0.3026890150032716 0.007060549763271418 0.8061837815421291 0.3042820456520382 0.9142333131001585 0.8769927604384818 0.3431170754502517 0.10546917123480926 0.21661998471438892 0.4984600294645015 0.7405197769921364 0.8796576778832909 0.31086781923974016 0.9774382063547601 0.4274490678425379 0.6610614990799991 0.016446479580060402 0.05029062232660286 0.5092892271320891 0.6984770956423209 0.30234721396146436 0.07504197799575418 0.19958598914930936 0.4810615149650781 0.6731794754907753 0.16607015352005516 0.46168239225755714 0.8855714037355277 0.2486047442670779 0.07307726952132187 0.15364392917698655 0.5893805586428116 0.14258387986412568 0.06242649095683872 0.47874957263260043 0.8164452230618434 0.1483274264642923 0.49740882508502415 0.08386419908807785 0.6874877623122222 0.6655031145254128 0.033730018622500246 0.7192973946966975 0.9310213060328949 0.26816850547844473 0.34748414131957117 0.976292186095101 0.4784389135545086 0.30506084440599623 0.09510774720305981 0.2520767270856421 0.5142429426609124 0.44829028780573277 0.36828415705680806 0.7574507961744208 0.8815914976465172 0.16214661038688938 0.2514947143887065 0.07816816243607672 0.35369057431216244 0.6506439369348225 0.21588977743082838 0.5289147430072815 0.17316198793490367 0.960591368381173 0.9903795081532514 0.2974717348255228 0.4343688272327181 0.5663923916552347 0.23932024051138256 0.16669200468525214 0.3377816652956671 0.9434391490903469 0.8458996771499918 0.42320671196002646 0.08082024128331466 0.3488823478787956 0.851299770473716 0.4143186989367805 0.16620722162670842 0.27172122050780145 0.1319436305549606 0.0983279456967665 0.20672176442907253 0.788572157296554 0.710608667013036 0.8485734376417542 0.8843424209475155 0.36713896015394765 0.19191503736737103 0.7177650148945145 0.5821206087957991 0.4963211886166793 0.18867517815219603 0.3467177317657302 0.34872700599840056 0.8900615881481904 0.7628161710360402 0.7103903238043273 0.2784653674752998 0.9114475121563216 0.8430642688978909 0.9341684702278278 0.09364920586977976 0.2942451543667979 0.3013062317733429 0.48462251665323364 0.701588876132701 0.20095978880907595 0.8705196999491774 0.14150869365665952 0.7650379058940494 0.14967895314359358 0.36939108497489814 0.29157127477574496 0.8727988750604052 0.36636976967561774 0.7622155065618756 0.24974311123138648 0.726291021595065 0.6749656470038145 0.42740072377802274 0.5096299985069862 0.8682330227869093 0.12449154814946706 0.6338731651127892 0.05270444000124419 0.8711960410702284 0.17064125053632095 0.6319102757789633 0.10442244764285535 0.5766340412136229 0.3786339185913906 0.9534009323480761 0.015516918023076198 0.9290990764345133 0.2948414421590162 0.4862613611308434 0.26800958191068736 0.18570186376288367 0.34659213984658743 0.6387110860661624 0.9580230483584798 0.7974788282856913 0.37272022822279094 0.44133970399872147 0.4912629807296357 0.05717841004811364 0.6618450330550747 0.7483719769026234 0.6420802307439054 0.6870814834451205 0.1810272409443362 0.7942653948117654 0.5733110017478598 0.9008215828778311 0.014364211198320098 0.23775058271452254 0.8916330325015095 0.11477289777929689 0.899250149098556 0.48265193625693514 0.32196390097354033 0.40742484871194296 0.14744100475408617 0.5033020654497004 0.12446768844887912 0.029264404731494498 0.16872806808015228 0.622090373596635 0.949024230449003 0.7510381852687446 0.2769381118705341 0.6876626254955305 0.4245550507620024 0.6048682809937299 0.45114258982614264 0.8845148583215621 0.41438878843903537 0.6205472565899534 0.24937145547516315 0.9682017585118634 0.5338879750525765 0.488439351509409 0.12779237136295074 0.06510577435785792 0.22284820711468079 0.6907664577011724 0.005229074563959157 0.30169032150138786 0.3927197882971214 0.5346588620944371 0.031057686402991003 0.4353866342787617 0.4274907134739141 0.693202622119883 0.16393300147290513 0.3477404162645764 0.10710972802144325 0.31481678149218695 0.34946130343748016 0.7301712100159956 0.8436159473148065 0.4966451312059129 0.7968504680894875 0.5436736832569644 0.47268193132269 0.9342490560308766 0.09773152105569372 0.8660169043554067 0.72706542415665 0.8577392111558952 0.7047606684778351 0.5534739171383817 0.43350385568196126 0.9435857924342357 0.5932742082251378 0.7388407457557082 0.4208757429914841 0.7516981640872487 0.7661833687210603 0.5634804703720118 0.1049907325014603 0.3789667354918216 0.6304596132076277 0.6983543033474883 0.7241607020784024 0.4433111327486773 0.17297559890501624 0.14747867653786773 0.23974201727553324 0.2163803365148279 0.2941303939649743 0.5469735097705578 0.987128832445015 0.7084024507765242 0.6196024870790459 0.6738433648377868 0.265216414977842 0.7752824887111045 0.6037370286100406 0.16086011338989836 0.18614217703231617 0.865899021571788 0.7624889991091575 0.47739200293033524 0.2721513030129875 0.8566514414720683 0.4295071258120925 0.0807504867779627 0.48272314552229956 0.5306616659584236 0.5158581301691392 0.268603383110536 0.5826331824688913 0.650634947353482 0.3720543690291156 0.6702283565413941 0.020415071618894887 0.2472370977645758 4.624235437584723E-4 0.5028585305338408 0.7767252705908948 0.6989458482689775 0.5468339497521473 0.6890976805688191 0.9634032953709862 0.4146863855309805 0.2116146232598799 0.6823595175312596 0.3065582915191807 0.4471625793613917 0.46485023108487666 0.3795118428970067 0.030802509066796824 0.1626464179271503 0.722924075449325 0.8133830936830693 0.07679116680634201 0.17209067041092352 0.45102316156743394 0.4125154146237906 0.030517003023883604 0.08878049184989989 0.992144811619178 0.4915247039393047 0.6203473274805757 0.3193415369806665 0.2916197840646517 0.2926548075272427 0.1465798850801544 0.017774309816829592 0.0855416828642025 0.33758973739922826 0.7483333864242295 0.5362846933125864 0.052156607119078435 0.06831395706078025 0.98387751522938 0.19203373659732226 0.887180962484811 0.7567878547094646 0.8030459598884053 0.9355299548459818 0.25717977218489385 0.4454314880435698 0.6241655495815773 0.5542402217140591 0.10955133155983587 0.24388693121919058 0.8929733098581213 0.3740954867000156 0.873328657483137 0.652255343253716 0.19728251471677216 0.03192069224302818 0.6719218064985745 0.7069824415003254 0.7386977078474452 0.7834182883440979 0.7870895762229138 0.6527912760895337 0.7214677312956123 0.9841876213793542 0.2628427242870349 0.5325392023507233 0.00860512478387676 0.8353630676552534 0.7186455303039603 0.5566014929801967 0.2166890833122438 0.22080587603124646 0.15319588996743716 0.36436097951334556 0.2733080656050034 0.4793860573333518 0.7632285952888436 0.32260967534770724 0.34488769772208083 0.2537423543831998 0.5762005906617628 0.8191448379765133 0.9576882202819331 0.13600968962043414 0.0013807860014658502 0.10456851033928871 0.08104503828046483 0.6913550390424851 0.24078111129183222 0.611276046562021 0.9194648765523998 0.5817647493502343 0.8695299710895734 0.8358460420885293 0.16897921755451928 0.9353770818292947 0.7947024935596053 0.8095200068842793 0.533195564912368 0.8107558989223895 0.23228703608522483 0.9011873378028302 0.36577647052845375 0.5908529109030273 0.30388059802327616 0.19826742716920176 0.4086149631054182 0.40800708346873127 0.34787037901317996 0.9931071620372974 0.7287641839826896 0.24918201353798008 0.0642726520393152 0.9659889353445194 0.12221850830699998 0.2197428811695008 0.1680640796421975 0.5005705930368842 0.4489315964304138 0.5544269754590012 0.3127547382868012 0.19355546364894805 0.5909969156061854 0.3557623442397858 0.6641162064088105 0.02761722235285924 0.9847045702425157 0.7130419441404312 0.6533922352808417 0.8650805470178285 0.9672277157058838 0.8090610006632516 0.11998308801850899 0.8895856257462487 0.4999806923055433 0.8571776573946305 0.26550959753619885 0.3739408004374387 0.18323031591795758 0.22785813588816695 0.5843260510067997 0.6845940669592931 0.8603306354570571 0.9217195101272265 0.2099657922617243 0.7466311273075297 0.5249904962085189 0.6251845998797059 0.9226670984655858 0.2879338913995676 0.6912725213307551 0.9452146888266693 0.40004140026711 0.057110216199129704 0.8635610008320438 0.061188325798749665 0.1611091744005677 0.8670316681753543 0.25252522430632995 0.954844734906519 0.05334394383171681 0.754293466643035 0.42314412985900174 0.5833130962738144 0.06302788380587765 0.7646365449774972 0.1321467984028858 0.8455997527325696 0.691137411964087 0.8753488044784818 0.2147627154641535 0.9595072294462673 0.13014052898848993 0.347888822092264 0.014464600110226766 0.9138106309038126 0.0015023744385465054 0.36745210271432727 0.514425767259828 0.9896182795490012 0.4693588628420383 0.17092626219450724 0.025702983523040235 0.9551952765525089 0.13273300521479237 0.9906583762373883 0.014694955474492732 0.4301300144167973 0.21683185918683923 0.04868413475507116 0.4112152376298337 0.08757537279340244 0.2333769968449385 0.5245448479220766 0.5285583131184758 0.8173978913484178 0.46058447619265985 0.8895453999530268 0.548225440955974 0.7084196665379241 0.30532500684887987 0.25128221093817293 0.3745171396726398 0.7245394054773646 0.8531591078817977 0.3844972469935186 0.5479871343806855 0.24737040856213077 0.6863554462990962 0.9377905498893268 0.8217927333110258 0.2371354054114777 0.3556655581679825 0.9004502650619353 0.9512645841849023 0.6239086475704215 0.7285412590463336 0.7896358059948694 0.22455886713804096 0.6828986832430836 0.8046460163618294 0.1753763202541574 0.27104308166441693 0.5703151268036463 0.4889247178941247 0.09932319679346235 0.7241064650878545 0.9004661348555418 0.24316123367680142 0.6629217519897201 0.38637450585063593 0.23008285001209827 0.49886646147049074 0.56164409397936 0.6478466090334146 0.5824707454738394 0.20750410399493024 0.6241516865761622 0.041264424310030634 0.691937432042656 0.9284752078867178 0.4787321216492947 0.9840582060928791 0.4780495101205616 0.4109469154814783 0.3140821633877866 0.9525068170035629 0.369621725543658 0.7125242273705219 0.32748498602134357 0.5612865594395811 0.48994370038708623 0.5159286774734656 0.8606559234811668 0.2803730036509785 0.3876008315861973 0.4542608684024628 0.4120085514124284 0.8112989257341647 0.9513476840388734 0.9360884986586038 0.6838183847472722 0.46606059881092676 0.38284369513277516 0.17351545051091743 0.5737387869167137 0.24450369736578237 0.6176624629955408 0.8595513986178175 0.5523499737975104 0.689809246485153 0.38283769862580397 0.2155156617922831 0.18189843384768167 0.016739002048344775 0.15831030756795061 0.9348729534309881 0.626111063012079 0.45358877219320703 0.6699924471763211 0.8868128413669472 0.8769442940576343 0.21633723129884652 0.1837666748447362 0.3110487327360094 0.0813557052996664 0.04226669059043098 0.7925887786457907 0.6347262773871095 0.7896614751073354 0.7503717895326584 0.09296560284598454 0.972995070501819 0.16677042244747387 0.8671606246192802 0.13242966148793345 0.8367118188506107 0.2085609021814786 0.9589921208173907 0.276234939699854 0.5639070300479471 0.7878945599855653 0.9458825668161618 0.6893488720824315 0.5160611032002053 0.9182363358651472 0.3367533571742858 0.4589176665150677 0.13590044084737507 0.9675779056509716 0.022380605819070643 0.9927338089536412 0.5503375265990778 0.6813369747089499 0.98984699564603 0.2208353081266139 0.5203435025667957 0.7801325388609716 0.0810848177302067 0.30021889439970806 0.1889553855219147 0.5868122318752899 0.708138448831044 0.9767501150706894 0.25928479662816295 0.6068344220164635 0.25906337006194935 0.49370533232934943 0.9463082196947783 0.2370845338254507 0.7809182411815919 0.9529818545040222 0.2893837148356857 0.810431514780333 0.41009515794076123 0.3328641273171128 0.18685274777019356 0.996806737790538 0.9352726883151855 0.945335751862578 0.8197494822504812 0.17631455545108066 0.8245651171549453 0.9762779921133479 0.9685547610468235 0.775860172448152 0.025980421854692404 0.8934976817451108 0.09509522344129484 0.7180065380679779 0.859088600776053 0.930378743145893 0.9611028227277572 0.06924972718447742 0.35584114093900887 0.5570527996392713 0.9856131971394414 0.6946273199406564 0.47977107569857613 0.8363364743513884 0.5052965329075596 0.2265521806281806 0.7553427517540279 0.8282841227576662 0.5949774323595342 0.9379896937132024 0.9730090862434653 0.7550564606727052 0.13001524403500964 0.17357379895502612 0.8005730005542826 0.4669070504359072 0.32788007727867274 0.8369828892690857 0.32773931486481334 0.8604403423307211 0.09317354745741979 0.05887399814663585 0.7982821266034458 0.2918961609966897 0.10850774608972269 0.5769000109503489 0.043041381631441444 0.7010676529656452 0.7630699000812041 0.5478106611180429 0.4170025734488364 0.11556056610458088 0.6913639292521936 0.4813656510688661 0.36500090704892385 0.44045407139276505 0.9065346148970393 0.7367638666445634 0.09313208379244298 0.9550241073614941 0.255843968423189 0.5545197008953592 0.20673929200707164 0.7360425271230442 0.2002832789304808 0.9101940158834089 0.5908865069462544 0.30626641539721056 0.7259687136459996 0.5711137714673368 0.25551604651441695 0.16708348723938216 0.531882112939022 0.6131907036386294 0.3450283088582812 0.5158815259868537 0.5066829691980385 0.3121766104800895 0.0208101135541382 0.3715606963399939 0.9067312842623947 0.7621685511328095 0.46527735956621985 0.5357097679758762 0.578125374666915 0.7939044880176536 0.8272866977258008 0.3023184214682375 0.027472679696519342 0.7663662654777192 0.5313100998238509 0.5954995355880786 0.1392755221825407 0.5874329746407791 0.00655521799290848 0.26254685371996145 0.8622280976438363 0.88280520966257 0.5572532221728669 0.5787551739872773 0.2423848446153538 0.5771462410510076 0.7916254082907009 0.6307814576773758 0.7680743738610093 0.27736359178988157 0.7545783481848325 0.13692706167692137 0.1095974854504903 0.7551803550019589 0.6679670437531602 0.43557283840413374 0.9861614820131193 0.5146227734444497 0.9723037700907496 0.8271002154658249 0.07316727536605461 0.8997765482147401 0.1850802939619216 0.18998319282210974 0.5748799817943063 0.449141634562068 0.9434095820607331 0.8083377027499856 0.33067719758867054 0.2351408768145118 0.48248226293243746 0.6460658332481729 0.09748410169314004 0.7408155915410379 0.7163724705169483 0.8818351644869193 0.9566675788131763 0.42046649391428403 0.03401644156860706 0.3794835891411241 0.6305765385900306 0.4669761687102192 0.9007263471812853 0.9100425324468041 0.8620791120881615 0.46577472689498045 0.3643911761936044 0.36442292501437923 0.9098613879248321 0.5538876528414309 0.6055551165926625 0.8999092515674407 0.6429171943233677 0.42223124506306475 0.7418533200945734 0.948959513509345 0.6795713089736933 2.9697668911254915E-4 0.19982381394949666 0.2854094113722936 0.5095436475677515 0.9445937710050926 0.10090341755808141 0.8910317961112648 0.561975376226208 0.11569289185847875 0.2734544869686205 0.19450279180481223 0.7380021204497134 0.07344695821880853 0.6989586692903862 0.36517871008673364 0.013373684149017961 0.2459311804821056 0.42805267763657584 0.7356883242958444 0.33478342519280613 0.834881655749951 0.2608698722372169 0.10019776394737478 0.18298494026008483 0.7088816009277331 0.7761913916346206 0.5893058914609557 0.8564371587524936 0.5052738024347779 0.6151985752465413 0.057879900789198024 0.7411336129886826 0.9127530562468997 0.2781581803453823 0.4387431227066586 0.5754355729146267 0.6388199703117959 0.07616082985353623 0.3129770859429687 0.99314228064946 0.8797533585900287 0.29507191774869224 0.0267031314941083 0.4336964069796584 0.20548535859625405 0.07514363607020147 0.04455917004381993 0.8453362024910396 0.6477397132191337 0.9872755602162029 0.8716894566584962 0.4751704020041053 0.43702069898733764 0.7957004252059314 0.7989162694292419 0.4395655867841526 0.2725159898699576 0.436910853114136 0.9298647421285644 0.6989001431680503 0.22870420897775123 0.8159316870520654 0.4341625518720045 0.522652494953387 0.4034334787894177 0.6235816224010517 0.7374389480578969 0.5239562994174836 0.19446495707807876 0.012157314424728671 0.40669809424024395 0.867168028080901 0.6549508639240454 0.6320114884701149 0.8187438735815686 0.30765766108111736 0.7230437691750278 0.22778645202402026 0.30968494596042806 0.30994656342282867 0.2468101627393583 0.7206026329721691 0.39514968735226097 0.608480888148277 0.6328479877818841 0.4612915939672131 0.8236933868725966 0.22474291823472548 0.6354844023987537 0.12980156762780282 0.6799801939153604 0.13506670518363162 0.27315639966479643 0.57719267970112 0.48967519135642656 0.4954388434683329 0.10955375943012113 0.7591783644013659 0.2945828077486312 0.5727283736089813 0.1693303679317355 0.7939579699041922 0.9644194030297791 0.30620517912142564 0.5716659400593439 0.09212549721079555 0.5907095979475308 0.7288689165575423 0.5596877150357477 0.45272741483060364 0.6686475585581481 0.3150257087245717 0.9797373867778543 0.9970825235782118 0.9143655467451091 0.6887132474059124 0.4216954002299266 0.8207651998493386 0.9879042448562773 0.02698912670295439 0.1700516173146358 0.2841929128573347 0.09628042210046639 0.9227615505292884 0.8351372473530783 0.9335475010055002 0.4363992651186954 0.43375531255928157 0.034910394960747215 0.14629931649790795 0.7041603553906477 0.5373700877368605 0.7799130123526669 0.7293917255749275 0.443957707742365 0.23560228836302488 0.27749480382656033 0.8631833411140218 0.8418756212974923 0.10986560756959907 0.7161252028862044 0.980533501407335 0.6762343351392277 0.212459688413567 0.21659177151759623 0.3979563507845192 0.5307404054505134 0.7995998363911672 0.544109529625742 0.7745576527761593 0.04834819162107151 0.5603357061583675 0.3638425987190903 0.764507247593667 0.9580414368969403 0.6763500272884946 0.7920656474757857 0.5500895829925214 0.8420939144097258 0.259542997056435 0.7461698353872936 0.2949137344885364 0.1279711741024454 0.45747205316332784 0.1289440439775491 0.9406528917195536 0.5868860269346209 0.7911303758083483 0.1960385340464198 0.18572318082462824 0.7001221607833694 0.09521216159232637 0.5502457703627736 0.930599982520628 0.6145923631945912 0.7679262113262064 0.08220818263634055 0.9520093935105087 0.050698530846612355 0.9055836468690165 0.8343907756724777 0.07702154073180079 0.4484128948953787 0.2965999182804968 0.2036317273895515 0.6681113768700915 0.8416011967816139 0.18021469836620818 0.11731154685444733 0.801541406224008 0.43596013249145615 0.13714785837919252 0.8191577274121883 0.12396071940945586 0.6325427678358181 0.472573747687409 0.6829279121123771 0.005902698793587069 0.28669764751511007 0.9762095957306526 0.0356920290958751 0.27590341493535697 0.3213484268442409 0.9999784917894559 0.31715956885944174 0.5152519517146589 0.4238925073417926 0.38062118452408567 0.027475188299698083 0.6209225103431568 0.44759916222182405 0.8527948325885112 0.8231324543195306 0.10302719314832942 0.3112383113357122 0.24761141102740825 0.8091317080541217 0.37316819963461645 0.6447736510444181 0.9522319772868693 0.5452350954458285 0.1013560976428961 0.164304775986866 0.8180473924647769 0.5351477444092357 0.5717008996665116 0.7760446160323731 0.9481401410983968 0.9996814957842315 0.850870914829819 0.7069702823120018 0.5691357006427838 0.548433102309987 0.4983542475654764 0.5388865707607693 0.49821662722347404 0.9592364500821225 0.49384597717467726 0.88299356409871 0.5529853309312107 0.6506239729551121 0.03462387568500269 0.5736505896457509 0.6200272696436456 0.5897597474124456 0.8645641884140647 0.6828808683087488 0.9974387366213923 0.31678273371132304 0.8364881813336826 0.36126274135951286 0.05724867831071989 0.4862255782966989 0.4808189576393195 0.496204579955285 0.23557718221286017 0.8271508820598512 0.45606778906622936 0.4452022916099828 0.8515381456406563 0.7058731521113688 0.8904574069633362 0.6495300525520893 0.2803066878053839 0.538611536405612 0.7890293639728182 0.7629159190569752 0.5614372126805235 0.14614496692129486 0.8768714741935454 0.04010631607440218 0.6104306914872061 0.7685175081708678 0.47664034709841374 0.12182416205956847 0.8197151056583557 0.8650689391901654 0.2773430141187937 0.025477440782410765 0.9846159179634296 0.01354147486120294 0.14138100903510353 0.10507658589564361 0.17055843616496325 0.1917406045436324 0.41814903554547034 0.9791703257046028 0.34965740895199804 0.47620437028439366 0.5850195658046575 0.9422114932675616 0.3780133086222983 0.724561768398602 0.42033363217602715 0.17261559327438847 0.15944928554761406 0.12173818633386224 0.3280573118678807 0.12987683909827286 0.6470861314298831 0.5648914972592225 0.8800524985960809 0.8934575302806104 0.6710672502351017 0.7379395483864569 0.1194492387064926 0.8377684183009947 0.44890937230145134 0.38376628397854096 0.6773965224080198 0.960409451151529 0.034369033674107485 0.7206701916710218 0.29550705656120024 0.3849414287332076 0.8913784871280829 0.16934510642300016 0.917600679053427 0.04481081861367453 0.24151941233483587 0.5576607379104012 0.11689049755363223 0.3216520666139704 0.826666270330565 0.7527358030568106 0.2585442807779904 0.4590421177655416 0.2528455625224588 0.2332739964508903 0.4625932142787691 0.901884980943825 0.13702104230177303 0.45985926719433456 0.12844912665617658 0.32195403467435657 0.8654293992216378 0.026883734696361072 0.5098296959879226 0.9817230430406144 0.8902544972187111 0.4870072501381637 0.5876816274810795 0.26146691318415083 0.1408427919586661 0.4275257455581012 0.27872906196348435 0.6287157271170617 0.30632603660693813 0.8477503420062554 0.8385651320617564 0.06449698888930588 0.45843232846441306 0.13747697047308371 0.17074253418510488 0.9774021333303403 0.7264293895852787 0.9888524196547077 0.6371087597014602 0.9343654857205133 0.5105594810826752 0.38741903631512464 0.8796702775251688 0.11614518019748121 0.025987920490448735 0.4228474524967025 0.11055909205895043 0.265846872931638 0.8062025859367936 0.0358425980509971 0.30421437232032766 0.040433109565557124 0.16296489419607296 0.9749354250107781 0.7346806938901659 0.09801249459429962 0.49932704859268895 0.17534877841073604 0.3409635852052122 0.9578043020392655 0.8028950029681796 0.49185312517792545 0.4746720319392781 0.6550259215427228 0.6084295976353056 0.21850350782833328 0.686716874418438 0.47955211974955836 0.21204328361061364 0.11037644588196249 0.3436228732351173 0.6350462006227692 0.9436634394808707 0.6859016594190847 0.02905133271993221 0.484365677508391 0.8600976318849077 0.43311016457951834 0.8824193818847025 0.15739525433015522 0.3079499048739628 0.16909675989804884 0.2540409155338267 0.689062938858274 0.42380146764114857 0.2092216769107269 0.31500324148902614 0.5281456131423473 0.7884833882377302 0.1910526124123343 0.5500326036796803 0.2115722690296964 0.13293239256592537 0.3677747166121871 0.3828910446245689 0.23428499439945938 0.2050355354047041 0.9367732752933464 0.5363856106987976 0.6613188338645267 0.650033326928674 0.5013195448538715 0.11286025416395051 0.8266135896636803 0.03146486249835556 0.0767675166348748 0.7960803958971162 0.542172058674845 0.9253025558283711 0.14739561900979659 0.021707732010033953 0.6015306563689422 0.35683852890264467 0.15600377892121964 0.4411036519049736 0.0031103051273124738 0.7180483294062836 0.46560320163239555 0.5119874603918554 0.24186646195487038 0.7025717392845533 0.27101239200942573 0.5895151114538116 0.2217591961893729 0.7261149765179511 0.15789998669563343 0.46263341404379166 0.8269158261194408 0.7619918477841376 0.007790837721826871 0.8831030797776116 0.9559595013582293 0.5146863774957814 0.7297791964262228 0.2683740221518258 0.8718297191663158 0.3787327957992853 0.8631685584838076 0.42453430637111644 0.0485213325838566 0.6858730327891203 0.43998509912811046 0.8318442156412236 0.3065209162121256 0.33036639768616693 0.622287418736789 0.762904975852764 0.9203204364471251 0.9804891468078801 0.536422527979465 0.2623017445586401 0.5542400683721199 0.7336768583112093 0.41485419980263927 0.8065986626323872 0.33689313183861824 0.25342588180020387 0.6259164480648806 0.28485344074800745 0.1199784365586134 0.13798434387158298 0.4228213933917052 0.7923576938554767 0.983685879864894 0.5140271787385053 0.2754777865435797 0.61409823723309 0.34885359685658857 0.6229243400973202 0.46215219014292785 0.6894742049083181 0.6351160873818411 0.4728740392740418 0.1927320873063917 0.7960928355728938 0.2646342513521692 0.46708871610960323 0.9113265765887524 0.6680070860866398 0.9526000936289741 0.28638386690003736 0.583166676031065 0.2725463185127608 0.6485603863231028 0.20393489277221244 0.7092440569536808 0.5099308737440837 0.1967046507916742 0.4406310519561548 0.531853276968303 0.6711501728920354 0.7686152618442526 0.13136927967983447 0.39517981632163246 0.09212326881087529 0.38857824284599474 0.9657950037335632 0.507301715214016 0.5759496401515257 0.4194051692434345 0.5914113572684766 0.4134502960663786 0.3200971199321667 0.7058184715566495 0.42264165785120655 0.33510129562289115 0.28166301586384024 0.18280332870189075 0.5863445309387859 0.1554444243855071 0.1413833827530765 0.14793162301480312 0.3524452060549049 0.23900615522044577 0.28585473367343317 0.06951436262363209 0.5477600219188109 0.46167692731760024 0.5174710639632536 0.756540963961251 0.13175587544421552 0.7971205022658331 0.5228688476188618 0.9517152625825288 0.32490010802151637 0.5565198066208922 0.48321074358437355 0.9254995162031556 0.4234201690025602 0.9108403442963684 0.5934290387967301 0.44692935252660504 0.7087070013753344 0.6382682832332164 0.6725875872558497 0.586064208547215 0.6652597148105118 0.1342077449659148 0.49381126115092255 0.16328936721409226 0.057983311204610755 0.5971974342525401 0.9397701961330067 0.30228702789756245 0.28781527737877655 0.9613340108424233 0.5984954578204069 0.46634594739502244 0.8440709673340975 0.5842659339166081 0.9543418394523489 0.5617799273802393 0.6025617432046391 0.9195479150319875 0.9002025596867583 0.17757790168091314 0.18045396389848445 0.4031228955529915 0.8996197095730141 0.7970093292093786 0.5520006414389865 0.5659251725301964 0.7612229516855226 0.8647601715378773 0.6536921640095675 0.9480741579228401 0.6529690649240134 0.9222768236947482 0.7630362874391625 0.7348557882582045 0.6799426110719912 0.6343461250171918 0.022091487609146587 0.16066443761272775 0.07701822104131062 0.42154202876566116 0.5950420579407012 0.6343049487919203 0.8264734682839554 0.5860202125162037 0.5985175369957333 0.22753129601711686 0.30068053241802484 0.27224412945451704 0.12402258014938838 0.638424190500441 0.3512937848100778 0.33957727490748413 0.38375821322116166 0.992187499028025 0.11888983944187104 0.24027799465975497 0.2542785744546989 0.8741688801805827 0.7208051657616759 0.2942847533959435 0.26684220648182766 0.8033876360180331 0.8899556945064765 0.3312815023901904 0.3989803193305721 0.9495782417147368 0.583712307030359 0.5308840541889172 0.8587189956570369 0.2727356879919258 0.5594441188643697 0.4333901272727111 0.4210560236391572 0.4213743448185088 0.9221317594531873 0.5163873843941589 0.7757072028662835 0.6641894189436863 0.68949085250879 0.4366149819410483 0.77580143998194 0.8495360286350513 0.4913358620218574 0.021826170669720146 0.4391170350554874 0.88811797482033 0.6224526921899247 0.364000701363724 0.04661692235749848 0.5365043178035789 0.8020382681186161 0.9131074291947184 0.707453427516016 0.32256910802168237 0.15780370848936598 0.2997193542783112 0.3152781202092637 0.29867540061652387 0.14912726896946948 0.7251342953425176 0.9076059268203189 0.9344019871544951 0.37118213822407187 0.1492566739325607 0.0581466685847275 0.8678373320120486 0.4407529285154558 0.1793469102619729 0.08143174181348867 0.42294197271709766 0.45449448233330225 0.5168467252443044 0.5097270590388946 0.7702070898099186 0.831674673476807 0.054840103716259136 0.5141379926939957 0.3588137580172368 0.7059830600052301 0.23531148684520442 0.6849758945412253 0.7807236626497142 0.41477530065670143 0.0732138096146151 0.860869594273491 0.5406088015138848 0.049163064908960386 0.333461323559227 0.5017699737891893 0.3002540588097036 0.4746477248844658 0.7073127504440725 0.43790894500995436 0.04204362124902117 0.7095355307508499 0.4762129097372959 0.6949391176150329 0.25336786221129315 0.057160779822218455 0.07401154310333513 0.4948936269360862 0.6722649744177177 0.927982542593503 0.16258054174610337 0.46327039895571653 0.3543371130286683 0.8589101789679764 0.3212014121685931 0.5344585786498613 0.8219583213791771 0.8397506466709216 0.2166973180761439 0.04088962201740076 0.8390174148800214 0.6904611800472124 0.1691347748521439 0.9957086410153388 0.9179157536192475 0.8979289665015351 0.3570222967303053 0.6344266748515344 0.010266695842693463 0.01728870994860021 0.5162841742562494 0.8679280047958743 0.958719524196743 0.47032832820987946 0.8788900413028556 0.41737063822682585 0.19455775623662575 0.33232176906741484 0.26987135579652444 0.0509327802427898 0.8541978146695086 0.4886672546581391 0.7355585685526124 0.5230212526624086 0.9574410229678638 0.3639979243663898 0.811223239068764 0.52847282298088 0.5413083480413367 0.2789715559814121 0.552912146133037 0.2274426935617092 0.6755134869143208 0.37373435984021164 0.756110632187574 0.6710890585655501 0.09910696601341273 0.9520934878866247 0.36297519915219845 0.7638261594547795 0.812550414508895 0.046721688574222986 0.9202262099027103 0.234832820032574 0.3354563702994653 0.9832566763177316 0.09139333464109645 0.45966726054150375 0.49745580255156807 0.7299467492143572 0.6499578781929745 0.8722529863298532 0.5681322878493279 0.8595333982453172 0.08403801827476798 0.13816476469750916 0.25168790663525875 0.021350658094100305 0.9286137411697677 0.2220577414259678 0.5729826695220209 0.9241599775989792 0.20789105398569996 0.9752466657834469 0.9638162518013105 0.9640283919895658 0.9182551133651136 0.893856570621548 0.9217963550386167 0.7965970723651264 0.31136505063668385 0.2536299153526337 0.4588660609286217 0.34937609079209475 0.8472934579601283 0.9143894256169653 0.18253746195650478 0.9972405746156668 0.07921609344785996 0.7053202519843011 0.26010672553913006 0.8830923476188832 0.40400805910719617 0.45164854820250167 0.704601775033131 0.6556899516442828 0.567205184324697 0.38395802975318716 0.3909401427649507 0.21793986191556547 0.046140018329184684 0.883300040586689 0.9617514607760514 0.4314795935135777 0.9516641546915962 0.25841669612955165 0.31454562511626916 0.10717343249861278 0.8867194502041847 0.5258107094818797 0.6753839588642788 0.026180106948498594 0.2791229353528274 0.9690597388639667 0.3589373185539959 0.18254083745482774 0.06628154927549601 0.5906867876750029 0.19079200853711775 0.91577536674848 0.2511538348314247 0.4380627447003832 0.22509039592202695 0.11076264490102161 0.7716284108459229 0.1842307829088824 0.829070192248885 0.18222166115668414 0.32958953934439605 0.2752066055386583 0.4257924157285369 0.13239733015779898 0.31129222544203683 0.41722261821781903 0.18732514095022712 0.5730952415476833 0.6972708211409978 0.17054806040463255 0.7528761356828885 0.23699785509924698 0.9078564241336675 0.9409312464875857 0.6098448652508189 0.16823454581673414 0.8594802436793383 0.7153741378401623 0.12048968668984572 0.4784659883749066 0.11729117680523993 0.47586334296779453 0.30288402154802685 0.2822832642709582 0.9212182750511538 0.342846321992746 0.38206074688496516 0.1845106936291755 0.03545908295674138 0.1496546550097546 0.8157210765652226 0.4820405099549975 0.8254778224585384 0.3297411038746991 0.9686108672820102 0.06411675265581063 0.7019476284942887 0.7105110523420357 0.8606436205907851 0.8475244484759757 0.08767360660305268 0.050692733620935626 0.44607597021807266 0.4669297792493291 0.36138452375956165 0.9326049391132445 0.6945183367538594 0.85565537358885 0.8113825303520242 0.9263449618751796 0.33928959685034366 0.29846421537406587 0.4923922056854313 0.7798335420230024 0.5346015115494619 0.515464978487756 0.020551363684915502 0.7568175339796018 0.8202849621431183 0.3450401003342406 0.764337820492723 0.1443452796630197 0.7312466610042262 0.6337374439859832 0.5933280259444443 0.38189377061228935 0.10857199871820467 0.01235519711521016 0.3320346792428004 0.22624031760731933 0.6701652247506042 0.3399889817714735 0.3326724372180432 0.8593359447712762 0.6810459769985456 0.7315986763611908 0.12855183793703817 0.8708519241554382 0.6268968502467517 0.2496922660337635 0.9589037339644041 0.6027340213397435 0.6391834881374513 0.5370124429707304 0.06628769762032038 0.43001447049733943 0.04614545037791251 0.8946512666115953 0.059339470533406824 0.17029441697705416 0.8311823995149923 0.5688065516443694 0.30646413461860167 0.9637245039393474 0.9686369829818245 0.29943369926808017 0.2801834899354694 0.6459750394742221 0.576701904570079 0.5053347889151947 0.0071970787194099994 0.848601192445257 0.5781558284379561 0.8327664795827646 0.2958185846975172 0.8043727800307461 0.0918588669678434 0.17843687544730247 0.21030028039332616 0.10955239776857328 0.09141521467419766 0.7128010607913626 0.34066019157735516 0.50076950152457 0.9621368267400098 0.8758533735834563 0.7769780135019664 0.04105231035158452 0.4634188115999759 0.7951544816467551 0.3387273080288655 0.9777431507175139 0.260863801342644 0.08461943355199242 0.5325633130294696 0.934437476961932 0.630968862363028 0.5226326246524091 0.9160870157508442 0.029750089622004983 0.16091521201606485 0.6793441860205721 0.03045781465798325 0.9776269828790233 0.2669778616800962 0.16554189072143255 0.8313287619589991 0.21708465557326018 0.13729815034769555 0.8092811917005195 0.5954295349538287 0.643673105510893 0.2682395733218733 0.7314953808304793 0.6092246421528896 0.5706226487297807 0.10827321856508809 0.9880969717561794 0.5564247856447075 0.1345489835648208 0.44931579350543027 0.17362008129436624 0.4127320659853291 0.4787546149852778 0.11267028511977184 0.1402433500980006 0.8290504936994075 0.6965792237715376 0.16519501735290587 0.7760710463730705 0.43849486374151514 0.6979153622445673 0.3710542475281249 0.6078401619272013 0.25788326225139946 0.8739198775276212 0.8353794524225947 0.8457591418921898 0.3709991479801852 0.6781159580261661 0.8283972706897921 0.37197439488684725 0.3787612131097846 0.016493955325072585 0.5920564843171472 0.08919821692866858 0.6805403827248322 0.6448843110679985 0.5259133914841703 0.9923222054284936 0.5112151023611781 0.6087887250681587 0.5250107030988906 0.30337833429809546 0.8001575053707009 0.12902083076810666 0.3997123378450965 0.018999675583542763 0.3411947274542445 0.23592117821335035 0.24593309470588687 0.2595709641482775 0.4666908442682426 0.8855752149827089 0.35392798372768186 0.8805491774315621 0.4042471992268586 0.2533777384690874 0.7447834241423473 0.9089726745378518 0.7611195504888799 0.3934505694408549 0.7059785936412293 0.501966904980913 0.12930283649728647 0.476834559738297 0.7608619601516068 0.28909342447031183 0.6333352494808548 0.37704146809157046 0.3516143750197288 0.15667846310567357 0.353225472746197 0.458464569996783 0.14124708223533444 0.7597957473930533 0.9465079365948162 0.04131170315731936 0.2474090078161284 0.3063681850129164 0.4662830442696074 0.16613773445028313 0.9092547781661547 0.44789096788471816 0.3262003826026668 0.9617183863637425 0.05835118368193437 0.43256891142213283 0.598694899152287 0.13192047859647504 0.015576341889358747 0.029198940706116505 0.3493873339596436 0.8203884555933069 0.06819259315034099 0.1139181935035053 0.5084119423108794 0.38628078178762926 0.9947506003358414 0.22697729847581538 0.8585357664825746 0.42995379394740396 0.7002084602396703 0.8529871386667606 0.08028595909872116 0.7444050085800619 0.9996268672832389 0.6556266747939294 0.43590233563325076 0.7364513954441456 0.2460269064331284 0.40658853925013927 0.5967845232942618 0.2610328767073755 0.9364748261275296 0.888577971031591 0.3232045726525028 0.7426946725776856 0.6501834446173342 0.623469944407668 0.1904072135668703 0.9202167258211894 0.6733976365596225 0.8792680814555698 0.3218216046554704 0.5435222282076049 0.4480567880780799 0.04329848660762614 0.13796011793664242 0.8764113144729924 0.544704563866743 0.11411018215842239 0.4586811043958454 0.9143302838617382 0.20484231238868378 0.7608432646014227 0.90684420876096 0.8133004361406908 0.6238992544897858 0.1263648867049333 0.4529883893706116 0.1442950645692389 0.053219273333005246 0.3092618782089377 0.1135142920929354 0.2367666786310334 0.4879400981174774 0.2498186496367092 0.268954204609704 0.0556913695601603 0.17736826025951535 0.5663971179452223 0.8042645833310716 0.7684103717923236 0.44542865838748613 0.7411754152718846 0.28967716097617746 0.4007136811024161 0.340994876249785 0.9379902632735785 0.7184971138163676 0.7560242639354399 0.45138270933286995 0.6500679748981865 0.8973372217602763 0.07720572043743268 0.8070159829367501 0.32171699996049274 0.4446294747816185 0.7674298452377051 0.1812499565015474 0.06559511175270294 0.49485657447144227 0.854705287373094 0.24656498564385698 0.6059524028668656 0.6287977084283682 0.7840818659782043 0.9868186893145384 0.6182910341395051 0.2541839304343656 0.5522099125156816 0.48497208689851323 0.7071713001248158 0.6465111133065846 0.16084709453896362 0.7066816894598217 0.36643300471511675 0.12635865614834185 0.8590703300627639 0.18237573812178764 0.3998898466363098 0.397414870630062 0.6683636469537516 0.5539096195358116 0.44559374824936404 0.8985109205770709 0.24041732249047998 0.7966220594633934 0.4430552114387646 0.1333167446125606 0.18438728311620367 0.5535901198837545 0.11034465765031021 0.33295422507525674 0.618528699188012 0.42610774977873866 0.7567889312023907 0.4796342924489112 0.8505304426354868 0.2528889197794345 0.04701608998816553 0.6735262095465014 0.7715424148581065 0.38022333867346825 0.6533907524480372 0.06041869546506029 0.7608009444424269 0.6762890493624549 0.08642122679229147 0.05433458582659223 0.6409220607452181 0.5750525931849213 0.3765088947299181 0.3871115651049001 0.4983460709802523 0.7486646447380046 0.6954492911672906 0.596063890720997 0.9890812336990361 0.10198465535444845 0.45238305818307856 0.9211908129261164 0.08680846189397595 0.6588454996845963 0.8764348716737832 0.22499952971105708 0.09857256164954864 0.5668950436349783 0.6070663982526255 0.4708619010199947 0.1727001713519195 0.886328461521916 0.3532092870808946 0.4999254072547229 0.454248210876373 0.04755161010003739 0.25353288786212946 0.3319510016116738 0.5052009488349533 0.8637921300460781 0.17274033215411844 0.9820514135306427 0.5997976461979252 0.766901981279386 0.8025760737713828 0.833155719154113 0.02978711987869731 0.4980710241416454 0.10126659935590343 0.9331267431764847 0.6100691490881917 0.8298985224334097 0.30140552072105353 0.1958102240472296 0.1620973128687727 0.02224544400971351 0.44383281348017567 0.376895695849915 0.8705812746286099 0.6215172115303965 0.1538475238091237 0.8683207269669532 0.05844119230750999 0.901503379885934 0.16613214313014546 0.8090519565148521 0.10563049153249804 0.7136265595963892 0.9260557195949792 0.7454275394386255 0.9493575862132354 0.7654305238244201 0.4475620527204808 0.09084758059832676 0.8464012681059284 0.3783628370418711 0.49163849675100635 0.9324501786307159 0.8526280861141564 0.5893848769824417 0.044399576956810405 0.31151310660366094 0.7804848409319053 0.05566804810877901 0.020109037923236706 0.9646715012329583 0.23215254455086853 0.5976119568277892 0.09200272303466972 0.6546449244677484 0.674212966394642 0.37264657776597676 0.562564290123869 0.6420173616477846 0.4190925858024098 0.6107571190554558 0.224943770752142 0.5537162253906025 0.8092127651709295 0.5623970649553899 0.23849938822120387 0.8891877868097392 0.8616894578923351 0.8104309613826122 0.14398106401960253 0.6718675496662437 0.12458913116874604 0.3799755978802899 0.2776634190678867 0.30139630442278276 0.5201624831507696 0.1434843165579449 0.008775488643909313 0.49113369782734984 0.5061407479848717 0.8631463605548335 0.42086292417312654 0.285873096555203 0.9730942342232368 0.8386488390072354 0.28525767107690336 0.5462625522461609 0.9140160726029518 0.5355764149022141 0.4336195574510068 0.9592888761523618 0.05170886416557352 0.7581997297041514 0.9265524580840138 0.5882827756697402 0.29021550076712477 0.9001303229374351 0.736587871267462 0.21125521056106067 0.06844972496630997 0.5572965366329058 0.016366674769924705 0.2129760306896309 0.43734602159859404 0.3199211815633546 0.4066396845709399 0.5940579288554326 0.9562625168713095 0.8736585083765274 0.25374654955374765 0.739410396051365 0.3100344790327132 0.6347744078153609 0.052521073384548256 0.9487794786191752 0.8849903338200469 0.02023065426423476 0.9081110344986544 0.3736910335857986 0.41040582960034977 0.11076010947585047 0.31898854920341435 0.9279668913257723 0.17128663130148192 0.9034967427425008 0.12346723822505867 0.9430842973799898 0.22903262588980577 0.5704507737746385 0.023330383618669903 0.8250013039279074 0.5484527725115494 0.14009559256897397 0.7007142370652081 0.7356717636526887 0.9227618395415778 0.8015601910528591 0.17817935413914676 0.370325449928557 0.11260578423945222 0.5943967171304885 0.7369414742376903 0.23069024629228574 0.6708254393767509 0.5916200108303623 0.4045234879303855 0.404685179303674 0.5115832391350976 0.8096918776500086 0.7506281095572644 0.7547140306531259 0.400640965171179 0.9863501675291261 0.4888205792531414 0.5895416068134653 0.1929910969505425 0.05565841931922211 0.171649477304459 0.34883571094300325 0.8247081774001178 0.6193793274235327 0.08812303496924889 0.564809687301854 0.7778007285037889 0.9647509386710483 0.32499690301163475 0.3820018563471618 0.9800081948288181 0.2109717952357062 0.8371192634812848 0.21407989947751405 0.8759017521699206 0.9801773871967739 0.28711713364750424 0.6956595857478125 0.7113113865382603 0.5834168754845626 0.3609977814942795 0.9474789485347795 0.9100164819648299 0.08811589875176151 0.24377768411087086 0.314870507152491 0.7898001247877324 0.7907372536666438 0.9480403806257871 0.16232590833749527 0.7643826244771033 0.5568286469201205 0.22836436196615761 0.01104825815245769 0.608412747259821 0.21037232532216132 0.8858503568892108 0.2917401680095242 0.32461003116389664 0.6553288038154462 0.3695169827588468 0.25147566394021204 0.6453149468482233 0.6944929263056089 0.7151225250563046 0.10135701505911487 0.22511831462972365 0.5742571076536959 0.027009962526495013 0.320758039779963 0.13690495988569484 0.12860371268933835 0.9048608346592312 0.14430764456393763 0.3705610981510833 0.021624058421240155 0.4447835688754441 0.6493687960211598 0.6517585251033797 0.9950893083433309 0.7029498700538441 0.5346194041922486 0.04764332708624042 0.21607407761080688 0.8000325097883152 0.6344435372727997 0.5069201555821905 0.7893605952794267 0.16598692302250284 0.4491927889155113 0.6937380000289528 0.2503279866244825 0.7772576638841449 0.48797894048848434 0.2179468077461958 0.38625730205614983 0.6222802125329315 0.018511285760576146 0.6387084428716535 0.04745990884848916 0.046187653647994376 0.8975926557340079 0.4241102586906833 0.7043632876841316 0.07192023070407505 0.5162404855992004 0.38634228247120617 0.3826648233390322 0.27247617711060024 0.5350005665415116 0.9801644621593706 0.2658905943718829 0.277829603330946 0.00787245056430097 0.25180359326003043 0.6931732104000627 0.5400815387245738 0.6973675269621171 0.15608391879194994 0.49978856969437313 0.5516987941967065 0.8685718509482675 0.42150941074861736 0.6489688158265767 0.31423445137991335 0.6166878600670117 0.1361798003430177 0.8857344817014514 0.4887128105559787 0.8303800017337638 0.7518366456779769 0.7069660262483551 0.32390063226172217 0.2223519212734093 0.2589994090081562 0.28666147306315226 0.15497593354382255 0.4689329922373753 0.8730659727476495 0.12130272588557622 0.992644063327219 0.39205501869594306 0.6894158978227358 0.06748071735317807 0.4815403685018569 0.28985031468801437 0.414968705326079 0.13240541772384962 0.2967976253694492 0.9762047331295928 0.46431391522559107 0.8476533627972775 0.868945220577769 0.8249501330663785 0.4204429692986601 0.3074823527602294 0.1798468944883993 0.13098308632689393 0.10327795819210506 0.9852689105209758 0.2561717725460779 0.3984473521582105 0.2636055525387071 0.2551012044585985 0.9019336981782099 0.7452198311435305 0.22568911938060132 0.14028632042853317 0.1579218352848457 0.4732910677319122 0.6387454078805291 0.18650753125488118 0.9533855402261588 0.9962328279273217 0.8656370201471557 0.5145937839424305 0.42412478277775056 0.737797133110169 0.4365453425420516 0.0463400352592539 0.4111770371466923 0.31979217560547313 0.4260528607418872 0.6165437175055788 0.7255800942302828 0.3427327136187761 0.5739780432395222 0.18539926977774257 0.8672299298315904 0.3293639053829115 0.46346980584713526 0.6476190427354217 0.0196412163351527 0.9490084822259395 0.09428256015980996 0.7981256473381864 0.30745752256690384 0.4697899874629612 0.7448172551687711 0.8588871059664525 0.4998472628018663 0.49613508198608336 0.1034276300008331 0.5974431847761168 0.5704601652518115 0.4405603171421486 0.26765485216260243 0.47489732542205554 0.7834087739674912 0.851600022691496 0.3594681773124606 0.4496253958590771 0.2647777427287449 0.6738334828444573 0.185252877464666 0.03267920932483792 0.34045356931583004 0.7513212573662383 0.5812010539734594 0.9418391049032604 0.27360563059773557 0.4023621389090122 0.8279249471625926 0.23349637870094342 0.4189392957016339 0.6913335091480961 0.7925262591792834 0.10502306124756422 0.22588022298536437 0.10247958453084094 0.2620193719041959 0.6550134095508692 0.9691385774472032 0.6808639443702904 0.2689200946570479 0.764757016280235 0.5626403798825588 0.3473034238227536 0.07797571537866033 0.9215315081218888 0.32160316994233196 0.01695318043234062 0.9024264453284213 0.24277789701403985 0.07757093677452032 0.3553129903543757 0.34885214075667625 0.17994920065524878 0.6225135966096496 0.4907779336991499 0.05157214094787799 0.3817991923905537 0.060634691232253846 0.7694563738834728 0.9507305679126371 0.9791044164216934 0.28031996306115836 0.6858316312938352 0.19261310302710588 0.1162898212310518 0.423698971936077 0.4519435612722479 0.6864053566751173 0.9001541677702262 0.927580689078343 0.2781191057588571 0.9385172698259142 0.06186865016865828 0.4370630185360682 0.3298818198763932 0.9037618460247496 0.5356480644426311 0.6519053514305491 0.4985218082096803 0.24209044492689147 0.8459710720402739 0.9557926130959572 0.5769097385420305 0.5502512915001677 0.6649519776373726 0.4999075362483997 0.16748951739448747 0.849651650770026 0.15244376091886813 0.2647679903597401 0.6659858831349658 0.8548681209491978 0.7236779346132016 0.5080743633535266 0.9622151963510168 0.3216639464000237 0.7027239174071118 0.9213514899392652 0.07848857859494252 0.07903583481913046 0.9147767753829271 0.764458767525896 0.8264487431060462 0.1867270896368597 0.6462592023293207 0.22458089570568596 0.7047829607038333 0.027807970312155317 0.5589494809243258 0.2551006999211093 0.68310636906498 0.04533817428565201 0.08282467989326803 0.18979511174754116 0.22141157859245497 0.2627901948294675 0.5320337566127125 0.49908361029043957 0.8598315229004174 0.6898268237620706 0.6893930206129012 0.15759815743126093 0.980064802933657 0.20818977666677807 0.8870218622585313 0.22613248497469185 0.8876637285087078 0.5568706023627913 0.6742140689966813 0.41606763173796024 0.19348716438417513 0.5913989379689883 0.008366071288814725 0.600659585257813 0.5829716497241423 0.07899763533914184 0.5307602274582585 0.46511433721747486 0.4534760932897376 0.9095178983225087 0.8962279367203849 0.7461641983042401 0.5728065044906301 0.7514818978377726 0.3141937093001359 0.8018987592480842 0.8661291628641736 0.8632243002574956 0.42832781351886773 0.5738926123211391 0.6738321188287661 0.9810381862182417 0.41301220908458625 0.35621112819577994 0.33949607588901454 0.9493950411053227 0.6970504350117878 0.1696647399282334 0.7849880040684571 0.03272235847121663 0.03852884207679441 0.9547210060306841 0.8087916983100104 0.6538866883935021 0.769463447435228 0.24277325119908333 0.2040414422112108 0.6414435964768613 0.9048428638215869 0.4430028046195096 0.4343264468322814 0.9358809577087858 0.4679044544131623 0.3908619756326446 0.5281851342556091 0.1044946419055609 0.82195746112298 0.3442432741092146 0.05731016252717058 0.945201378056599 0.01879340839372201 0.1846893525625798 0.5488363590971654 0.9215062742128127 0.1747006262990054 0.18353774860035554 0.44868565089264767 0.16400728837140988 0.6673262212164676 0.32668341359166875 0.8007941307808621 0.2982326158297879 0.5101055773443984 0.8566804797201322 0.1361485493715544 0.4142716254823131 0.6057059316460279 0.38719469495698233 0.6759105773890515 0.08057295930668529 0.7059069072369616 0.672356389186012 0.8445091886794764 0.42244869934249707 0.8125679353455288 0.8723529748172969 0.261432559956436 0.9352367536644471 0.44280867915103006 0.9466395486181884 0.8345531163971178 0.8827379315391318 0.6512550712327898 0.6274342557201562 0.31317010487210317 0.8434580929124691 0.9531807476713988 0.2787052632063419 0.6755564334666969 0.21035032658705488 0.3245991814514191 0.8296443566266365 0.023647601105102445 0.22416541266529966 0.26883958982636336 0.8866617156894697 0.23661206815485925 0.42618345085711573 0.5338651996701074 0.07422527398089995 0.4953943878676029 0.2530134009176357 0.44056978447220163 0.6324074139549046 0.9319234882989146 0.8643113682675889 0.9395142284800585 0.30779035582762604 0.07953048288644693 0.1936825979426101 0.4520424760018148 0.6144209724099926 0.5052217156285691 0.6436958812842254 0.2714414892405014 0.6127949774556298 0.9898041588535218 0.8078536839220967 0.8007872663836583 0.45738897124365063 0.8884624037502857 0.09552501226728782 0.18634580630789999 0.09704851670159498 0.8522373342926023 0.16026198142809278 0.9688731636985235 0.6775788037108853 0.9199587169265702 0.5875854238884296 0.21988671016448103 0.39217426523816856 0.10053516603812174 0.3852389850993926 0.25595566447965334 0.7528620096883323 0.9571066296762843 0.4370745422924087 0.43128514337886126 0.5149231410355697 0.3774160703338446 0.23114697462383005 0.6973455607708479 0.7762951620960916 0.6688587047032531 0.8532590593747544 0.588024531917229 0.09524786067269986 0.352018343680061 0.4491127857628262 0.03975612452494981 0.8221637075763709 0.5431026637054589 0.10138576411489353 0.9883080790779913 0.22504341433516772 0.6135397668994681 0.3424672947174229 0.9973845955334941 0.4033957609689838 0.894101039540088 0.3292510887632095 0.2689286645887309 0.4314605096267946 0.5512662368904904 0.04329668312195312 0.2609845435535225 0.6492516349538291 0.4051480529333733 0.7478356103941861 0.7962076010191567 0.5286978046802724 0.3811460405061998 0.332511688787244 0.9825851425810576 0.0821955009033688 0.4915999798587517 0.10740981166772101 0.48765853319134334 0.35217958924295945 0.7290655095537855 0.06984124475562936 0.7085136224634327 0.12285543170180613 0.01943550294956986 0.6605171103427636 0.16203256137759015 0.6290248161798733 0.43098288741957047 0.5501567341414084 0.9833773157795834 0.7019781853210816 0.16450185858444177 0.05324069235335949 0.7654011496386766 0.4887139260030283 0.46007598985233145 0.35253295784522565 0.09766130441017429 0.6082846446342598 0.44760339967135987 0.8361604038604176 0.07093263932252536 0.36785979155226955 0.1410606766744371 0.49845221397935957 0.7015980911772477 0.45113260753255136 0.23058872104479133 0.5364055143829625 0.7000191302417242 0.31298875605017573 0.06183562009600929 0.9855576978511053 0.2579654574374629 0.36689282047480243 0.20859245341179933 0.48221025462270073 0.5624095271981769 0.9352382183160514 0.34261033424756115 0.013362479780207104 0.5564689245580285 0.8479446887653744 0.6835881782352258 0.8213778889316008 0.06300516419604685 0.5208770885388692 0.17267198885921897 0.4673202472457858 0.7623647342902399 0.32319787706421177 0.6544086216440401 0.42505968214392875 0.6729744671129352 0.05155505430412177 0.7749479642514002 0.477664180397423 0.7219799522342365 0.36904603225934474 0.008389378593213093 0.6182551391206319 0.712842463673431 0.1381872857482478 0.20851984192840978 0.11942011017294596 0.4977857807732473 0.6796587602492385 0.3497923626161239 0.928439750067641 0.44521190696877766 0.12238046738152897 0.6692454526636983 0.5700002312000284 0.35568020138319856 0.17551855815181316 0.2501653888540524 0.2937261952928888 0.8247717245092463 0.8493067780025398 0.19864499674321234 0.29078361558169596 0.6771366063906579 0.8791082299905171 0.955794003749378 0.42196580125716165 0.5032495367012183 0.8659317136956294 0.9612076123864188 0.049089941303884554 0.5188741005007363 0.43665567363879365 0.8374017530201998 0.242195896109777 0.9369641579012447 0.04850220397116256 0.5002745336713035 0.8469522864843698 0.7927062737289509 0.41484202155873995 0.11725544088725359 0.6398398491136207 0.6946344588606339 0.9132013109207712 0.6326553108404761 0.07309564907037358 0.28822429742202293 0.7513793004089805 0.6588312666970376 0.9170655688104109 0.3131008687912852 0.113875596368894 0.8921330480501639 0.4445273039744866 0.010970779923168084 0.988364165752258 0.8541981758451846 0.6697091772754954 0.29861566489832236 0.8807414841457586 0.6820150792851644 0.5541451729684768 0.5587606582499449 0.16731351185931842 0.894275063652632 0.02216971726943351 0.12135842446779033 0.4039770079368884 0.5468853876788253 0.09077047578681219 0.5088052419652535 0.5147886842029439 0.9571173718865036 0.4116812892607076 0.30083277990438995 0.41610487695351617 0.22371480898151375 0.048011009122822856 0.03829564835664723 0.29751417659509627 0.09938997734857735 0.3633690339203578 0.9298007549499663 0.4770733914400199 0.38248548087883405 0.4883612251117836 0.25110222939228866 0.47589733939535306 0.8861823369063448 0.9474949472809524 0.23441100874224008 0.7159413861319199 0.2164857353652484 0.6789453889286152 0.9332456856763285 0.034855017996496485 0.04432203588089623 0.8602997876984949 0.018177154185788247 0.03307341330558944 0.2924314860924295 0.6653099647427452 0.7097965498284253 0.12085380447763239 0.674343262960184 0.3198476802078708 0.08746238043721544 0.016751395687280124 0.32057073258108937 0.5996717406258236 0.825859129390958 0.3640697694766366 0.8062662914966703 0.21827919312269517 0.0061267024387164915 0.07810899811551864 0.7549271065557464 0.390027625587998 0.024296077598583987 0.014574066599659652 0.33434412527946444 0.2218793956126165 0.7769931641595182 0.5582951791924747 0.3838387917185311 0.4865325026785733 0.24285013659911558 0.7714507607072281 0.6159329176311771 0.7603672299024826 0.2977888788951447 0.8117727471352613 0.3209493010150367 0.8715515556144987 0.10331888618423835 0.43375296692086807 0.2696418674341806 0.0968122076375253 0.6892177026705733 0.5196585971296054 0.056805809408963515 0.5697929536836075 0.4154390947731085 0.7522035697334033 0.9579194915091465 0.9195847566083873 0.8428203272396774 0.7370117300683332 0.48122209991327125 0.24492754335405054 0.16813672177418992 0.1105470486511082 0.18923185029067768 0.14774666696440342 0.7862782534508965 0.4279750072707885 0.03656151542078301 0.6991014685897536 0.7307990265073385 0.9154580464515163 0.4249895740688452 0.891287583129351 0.7138179604627515 0.49267758298123565 0.41209678781743875 0.3417481820388215 0.23722464654697717 0.8239137652786123 0.026379512610602562 0.7576140725044663 0.7793712770937284 0.5398034104488233 0.4396027702204943 0.9441060000627197 0.3749084508456577 0.18693553608656188 0.8262575052526033 0.9456971799200393 1.4842725234431864E-4 0.9087328304423834 0.10033820621738454 0.24584746260748136 0.052316888937823824 0.3386067435112563 0.361258384852037 0.819285185184898 0.868279927095269 0.6951629136741239 0.8035731058399299 0.230226747742315 0.6668103957139081 0.6092723846256016 0.04927952642249145 0.9005873460352264 0.5181835450889957 0.7841133201163315 0.3468174970711546 0.1341515136255722 0.9458690722534799 0.1918274511084076 0.8474942103036547 0.4758351277651883 0.5473127037886133 0.14422339883920576 0.2179083982908806 0.5095851271508067 0.5842777030186767 0.23999962934964691 0.1760509989331498 0.2541272159308624 0.8379422737388494 0.8872471944963928 0.1491108028091045 0.9026858084950764 0.14992660556811666 0.8275330386353592 0.14261540451989974 0.7709643887268667 0.9776484884882253 0.84665243426363 0.08338896209977864 0.9670889264937019 0.980884469950074 0.647179060728516 0.35291006903905753 0.47788058268008216 0.1261387299558171 0.6792297664762957 0.9630045709972873 0.07773841527008862 0.04639931885666426 0.5784004818002226 0.8424097412984772 0.7095764145243668 0.4260877268769494 0.7436184514311472 5.976571805016428E-4 0.7616738972221145 0.9944839915669836 0.5026953949083424 0.9143473693543105 0.9699613960431234 0.3056365190643423 0.2596549022579112 0.342906815459607 0.5121795796512362 0.8587423945196544 0.9954468530099227 0.9191873173542217 0.22851109729303043 0.2702099329989156 0.5024501645672993 0.2872845966411647 0.802701264266658 0.14709264587124093 0.7732047728585724 0.26072222625148256 0.1683455024463808 0.03130946537915469 0.0351025912200833 0.4611648032453154 0.36693757410362016 0.28504349844202115 0.025306057421424888 0.16394968393063192 0.5774347764124362 0.06993735739614426 0.2339522256068972 0.6246376732139358 0.06151184884685634 0.6361802503619598 0.008739519123228234 0.9898848442083505 0.32433869392581927 0.9823082869765969 0.6102826952979815 0.006288936473854978 0.8776308056476992 0.5725162372457301 0.5351615790027744 0.132465376317074 0.6098862398349963 0.20550499075220519 0.682377241211248 0.33245974622348595 0.9181362152602083 0.31736656969504395 0.22896910802236958 0.7827049364890591 0.4362009691772013 0.9707338332157124 0.4018350194340079 0.9600828067053039 0.4063560705644055 0.5333997554717205 0.6853356910893674 0.09525150599552223 0.48447928397336204 0.5818368210865436 0.3880538165590002 0.7732517961900743 0.04316110242122573 0.8785241309100759 0.9071174192601417 0.5425065562905665 0.842927029064868 0.3667162815120687 0.4747856062427096 0.7536796379032622 0.27863323168597454 0.0022647036591594283 0.6495347216535197 0.4914617655941286 0.2762612584519343 0.8993282178396033 0.322671065494866 0.38520244194804265 0.31475210289754196 0.7052872046635499 0.5515361860818628 0.2884090612991449 0.9781830885454618 0.33990091176250237 0.10165930704376303 0.302484901923342 0.9055059382928804 0.23472531655168605 0.18275076816810365 0.3866805858772985 0.9982279906032506 0.9910347483225523 0.4972192219577697 0.8440837532244935 0.27083276080644425 0.9379671834364494 0.3849347477388594 0.30257456183422426 0.7843998485443969 0.4173397953191148 0.544248517504318 0.4200252033848071 0.6907987834976459 0.7475579367067747 0.5368194522823228 0.4346235863333049 0.5329948736424109 0.7262670614531598 0.04258667306338759 0.854053097907829 0.59641957448177 0.21987397673655773 0.6212660988104668 0.03136139809416383 0.2870217842472721 0.8577899871083858 0.5580383237598641 0.11070335196897929 0.2235306383452077 0.10022555882512052 0.22363412409155448 0.8427443119088593 0.9746243494875405 0.9805921740539523 0.2628129891579868 0.06736159770086025 0.14611362661748306 0.6370438926820798 0.8402690193163618 0.2403839668789145 0.8289126678498366 0.3424177962123094 0.11928770883632933 0.24145980840892178 0.25548419988273996 0.11780880458198217 0.12601616582397546 0.8417470398238035 0.8806094509501633 0.17224086914000714 0.16099846493362235 0.6329684843533258 0.0017481638850984638 0.3493736408881255 0.9596666506976723 0.9234788009320275 0.4649173332170926 0.4819094850888418 0.8747671397483785 0.09234028810356676 0.9639317904518382 0.055343067190160466 0.45624161101080385 0.1590061407536596 0.9340138130774194 0.8649215414725075 0.9378697320204386 0.8740618705638846 0.0651926699173353 0.4131606429205692 0.30502113768725714 0.46557689345142617 0.0165796750009507 0.63998801745205 0.8141524086140394 0.2859696043126705 0.999517519961773 0.20356190765099802 0.062224041887258386 0.9958913610806887 0.5157676693583857 0.9370400358926442 0.9630016740240736 0.5944858535951947 0.6863129473702625 0.2950495385585782 0.2772447458510858 0.5594822603183619 0.28701696417282807 0.573336175186927 0.6650746011257872 0.5959318626913062 0.8192955801758156 0.4360467250451442 0.5813536234632012 0.00899964461164715 0.6871204064892085 0.8173259329212803 0.7186956663599069 0.5140736163259504 0.8260876784505132 0.5773849698221941 0.11776695561182038 0.3765567027916501 0.9359502788055105 0.3879259474413258 0.510826423346087 0.1340567761958965 0.804924752967021 0.19778266361017993 0.9419925946384303 0.7952929498698571 0.08224907420001726 0.2868649910517359 0.21651393971847643 0.33288703982994117 0.2548810392244746 0.3549705757097441 0.2713580468446599 0.49131092712937985 0.5148913704792452 0.3117540652351417 0.13072477506868307 0.4166122866262999 0.6660193784126286 0.43512071935220153 0.7126267171737222 0.3811425770516519 0.2110233195301986 0.3475379170990448 0.3864702534265776 0.9243624326013586 0.28587170298557874 0.8232343988107562 0.8466088238681592 0.05990184977474278 0.16248561613477697 0.7839839572582263 0.5631295622950568 0.8094035449580145 0.6286562984393416 0.9671462920605213 0.7986732860146526 0.21527944585365277 0.6719265741886761 0.6568652932423326 0.9388768712534651 0.1573624583827391 0.5436381060198263 0.5578002791112867 0.6740665478852472 0.7525361700623672 0.46001131614364676 0.3604936574704537 0.3911186966719863 0.8517778853860525 0.9090383348130304 0.7984536686354251 0.9869450974711672 0.16565095747022585 0.7751334130428094 0.029258802321343813 0.19884769810112635 0.847134523365862 0.2678997448015429 0.9854657341805575 0.46202723333663864 0.9397801284887725 0.9470351970347235 0.9358303425067744 0.2508193936489197 0.9595363220717045 0.9723136681735085 0.8733270412260148 0.8377709787499308 0.02018376275828737 0.07574313588997761 0.503678684331896 0.41345768933733484 0.3351751533790285 0.6524150485117524 0.5465858861546499 0.5499840771986386 0.6327756517126188 0.7972241859321922 0.7815708208844226 0.062395776805740266 0.006751129007940615 0.07655187062302127 0.7713276354950411 0.42477226044126415 0.9914530847699669 0.31716977367752186 0.47769756568128974 0.0315585253209022 0.1980008304472941 0.7190352084809775 0.04886387510150869 0.7597322521933397 0.6773626235144308 0.15431341945209798 0.9010883986953706 0.48369260208640996 0.2864897190702853 0.6018759138656138 0.8442871054729205 0.4222956613472485 0.7252581411677134 0.284688198796082 0.4079702067182597 0.4562506181743562 0.584824938548372 0.2269144935476901 0.7125473489853255 0.8441298776466977 0.702107438177989 0.6282305392115077 0.2007259802229987 0.0865354372146172 0.3504447700991007 0.664750368964215 0.2947072173526766 0.040502488793447866 0.3131036888735852 0.06913640650155295 0.5771099796507511 0.7700111602313999 0.6841410346310398 0.11687656704467264 0.4070311717913301 0.03351522014854702 0.3202378288428078 0.4513761472904205 0.9773754407358841 0.7556867523990463 0.22747062036502186 0.5174262372996932 0.6955657253682593 0.3081138787527673 0.8085882050427864 0.24077433254411007 0.42697116043010763 0.9324786239410852 0.8192123778408278 0.9909373344045124 0.5788665211743464 0.17978022087210777 0.08548393357479656 0.7950606675155872 0.22246317460116116 0.8104586599357463 0.5210258365599952 0.23254155591511305 0.4515285813172729 0.20281247066222063 0.7476978624458402 0.4332985886061125 0.9583377126045068 0.9508934524583795 0.6724676349966006 0.12698380729416792 0.3711761893665957 0.998100207991949 0.7018256364080073 0.7460972772901097 0.24234193047616137 0.3489816247047671 0.8855222299854018 0.1728786042596434 0.7015682914635968 0.1293003008657675 0.1730070964715248 0.8380137488482381 0.6970182576678371 0.5231949036682243 0.13880291878022444 0.5286093642582183 0.4344760649930067 0.9063198910579588 0.9871461014010834 0.4435511856403056 0.09508900063355818 0.31512955534299303 0.38157976214372114 0.7462771684397291 0.16298857728209448 0.24501283272683538 0.6617852347593486 0.10815961386914097 0.7263472691569185 0.6537742935122419 0.30779001707829 0.5101051172981514 0.9289029820585475 0.9619956149735787 0.09079155186247001 0.8756452500292544 0.3437104738473753 0.7881010302240159 0.21018813694586147 0.9457042131548167 0.41685536239714904 0.13348936927968535 0.858009480034208 0.17339490656404977 0.28609261715694057 0.7418844076500961 0.07479089543227602 0.3166077494661409 0.21784864553493477 0.39709380652307513 0.4650374065031009 0.8368215817597391 0.6257518424124988 0.9946569041142073 0.32577432702551123 0.17447958133802277 0.13483376534006164 0.3253996770562684 0.008284015175230253 0.4644372434201143 0.4377691875436317 0.6427051336095672 0.8809784949488966 0.2794033540867332 0.7530000582298157 0.25034575500888356 0.09046055644971773 0.5449596838589661 0.5393149111436315 0.544014124141635 0.4281764588436111 0.8566581610860013 0.7474317165235026 0.1672267987368291 0.0016655657949264935 0.047859075774694615 0.2963589112382886 0.20106545498637451 0.082263654307277 0.006387685388379527 0.6507859199472 0.5972585556356661 0.4552179544821817 0.4187507265642919 0.3454808220891731 0.3853957476272446 0.833523900120751 0.3325682144042038 0.05364943202321193 0.5376061961852902 0.4133949956455627 0.17104033864407342 0.3284369576065066 0.7578215618245875 0.4471284017321202 0.10768263428029601 0.4602294981280407 0.8777823870044411 0.7882313451314188 0.515227952959376 0.9871394259280253 0.9882371990312082 0.7240237507631683 0.5395059199215975 0.27203546687326896 0.9113211419130292 0.05253527012887371 0.7172768310965318 0.21670430806744645 0.12889129324446058 0.28336550517415215 0.9564089069229106 0.6222076048925164 0.8216339662545896 0.046278470914129066 0.12745355003688652 0.9242248733894517 0.24504890757547193 0.2766533424015778 0.97758622643239 0.03314877418022466 0.2709542472123406 0.5135156761504137 0.07104536380049309 0.7909262703293841 0.5569448601636288 0.6937241391368164 0.9846149582459615 0.12011592635317336 0.47127630851360913 0.15499027041651925 0.042361605208149355 0.2853575086523784 0.5969350498710198 0.17075921592940757 0.46074993530837205 0.813427135134545 0.3661953728287407 0.2315822014609925 0.8934246977216749 0.5252776559578605 0.13575167674831135 0.4604619438206997 0.014362232568310707 0.8219405477367354 0.8025546788959065 0.0037544631078304525 0.9587574686347042 0.8358918635584601 0.8817686285178534 0.97456215235735 0.15292698943013805 0.3104618332594965 0.8208057955122464 0.5421542895520371 0.3076730264778913 0.10209388366895178 0.2071835911671639 0.058803230716529264 0.12396664426424842 0.01373571156748421 0.4429888488093383 0.6945254240261148 0.5654731265009985 0.26237110281619935 0.40502270041317134 0.9105686369638367 0.3893561400005374 0.25967856544689216 0.5872384441831136 0.24408773913687998 0.9949616223718931 0.5420284777342392 0.23087432938326802 0.2993035507135774 0.549396201618738 0.6245078312639525 0.9877216491189181 0.5445402067752272 0.779679084417117 0.2733875551176691 0.7332580304392309 0.4998884512676325 0.7830607006677144 0.19242461213144058 0.248914600592805 0.1668439046801642 0.7002647429597956 0.4006168199364456 0.626535114731351 0.09912039375547754 0.12082500483960312 0.6425058570064769 0.41312357429853164 0.34294082659174263 0.6453523213672935 0.016849635641644656 0.7102668203536047 0.585458564185904 0.11648064972805472 0.6354621995484727 0.6701568037098687 0.24156513092014387 0.6484056714952214 0.5312460582181915 0.08892663464359352 0.8821902063079643 0.7343435064679462 0.22291776290761334 0.08805383245142973 0.3613028556358642 0.39190061967800816 0.313654809765855 0.19587277365215128 0.23417321061905183 0.5182609686503346 0.25186819563607654 0.4937778013685329 0.576056443735618 0.3702592712287721 0.9145313880907087 0.8754871980618292 0.10831162978099107 0.3588248628234445 0.9869732672441223 0.8827776400338255 0.6180702634510934 0.15294302513651092 0.20776906737493217 0.027387399547154856 0.017920328825661413 0.4251961647670027 0.4233399980206096 0.9863260632700784 0.0336735048080814 0.43911046496695805 0.054098475834510884 0.5666171505913646 0.43113504186161156 0.07794771176065218 0.012452986916201114 0.8755396197205625 0.896896465339763 0.8452465937219634 0.33271248308096535 0.7037225635112111 0.17352412561957364 0.8293312538040812 0.7739077109431372 0.7309872965434463 0.8161167779250252 0.6171692582887782 0.4387722395588619 0.7986195548469979 0.9850582293942708 0.6673751159659848 0.015603616352030891 0.15679357708433583 0.018891586105011182 0.7078195069055868 0.5944510039176887 0.2921592471339596 0.14159403015939553 0.3102186434561395 0.13505254407156908 0.6681424183504938 0.5501319888807776 0.9122417923298882 0.5711717273497167 0.4692971434418173 0.3010284982823115 0.20708972394945357 0.22445951083724613 0.23108848472511434 0.8687760807202289 0.0011602798483324461 0.6706946209704986 0.05777270355617514 0.9842797606589708 0.3523316109103354 0.45223144064907816 0.9835815310504845 0.3781167672161718 0.04455779121280512 0.06271141288110427 0.9162054328600714 0.351250855332245 0.22888530519333927 0.44357050290597755 0.5166106962015031 0.6749405554637874 0.36666323385942934 0.8266016939742987 0.31551359869928697 0.18193270518448323 0.4476338006248197 0.13280806163866798 0.3443743693103891 0.5526336858638814 0.823754173155152 0.2950086477196292 0.5675621695774643 0.22280249300878596 0.8990143539840033 0.8101358400347541 0.4423824568192748 0.04341178206846619 0.3461803597756786 0.546889510835562 0.004945711985030132 0.30867360908761576 0.48330510743995236 0.3353898948958939 0.9823968987860325 0.03482931871981132 0.9360149370146493 0.1476974267842861 0.5658130860771329 0.920153644155877 0.7359921717087665 0.7415445342119116 0.6431166889253239 0.24572507624893458 0.5617392220842076 0.48435113423267184 0.4876396377516863 0.08686636790908664 0.9146237140543306 0.7005716160296048 0.12551413342931006 0.5967598302306062 0.6413673913260808 0.29264822109242583 0.8638536761886654 0.7965943969017684 0.3635569284289587 0.5743219371179907 0.7517278356402111 0.2607027798369659 0.05541740297758235 0.22477860234976932 0.06722286734652227 0.32049820142404595 0.223004439327824 0.023084842511739923 0.2764193427848298 0.9276225233025693 0.8811344054552651 0.554594362079613 0.2939074740378972 0.11456820735563467 0.7987862327203585 0.3944823695537719 0.2894992473170328 0.07013916689935029 0.2893727785361224 0.368292133566817 0.7320860656299131 0.20746602256481694 0.15919334525755047 0.3653539755623735 0.8391292990986909 0.24389382981973207 0.295543137355453 0.8381643888975077 0.10906045662787556 0.2349111340696669 0.7753697701967698 0.5592883639094907 0.1539065966365576 0.708391438562379 0.3060304987553486 0.42707577469013613 0.620745698353341 0.5307690682762082 0.27161850576924607 0.112458405481491 0.5931360038236527 0.9033735575065586 0.6514899473535042 0.12132085787867108 0.4974517265131848 0.9349977553915404 0.6020383012893777 0.41041573736427195 0.20409756820778646 0.5658770673132214 0.5484259189099209 0.039519321360061865 0.7098627049374666 0.22860778485281552 0.2198971533908658 0.4685008494594981 0.5152144092329579 0.5231352515742529 0.18046149051526506 0.015205249089846218 0.5691203628089233 0.3557931662102275 0.05614884691853084 0.6785252930279064 0.444027979136132 0.44647736411832406 0.8407038175301974 7.662647665580558E-4 0.22491765623452709 0.843388885801927 0.3734989579392556 0.894382082636783 0.44888664923286903 0.10138293192099246 0.3266120993488544 0.39038923074403364 0.3155087460081367 0.07781115095270452 0.8124261440143344 0.07924053062677971 0.30325390587612056 0.8788472460070905 0.02877402765128001 0.6096682936640135 0.2735419874931283 0.34963685790351406 0.19556560858324945 0.9869818314443335 0.12154239939430878 0.6432139898192475 0.4049974791976009 0.5695320952068484 0.04162176801184003 0.8259786892937442 0.9068603187319941 0.6005316424054364 0.5153801973482292 0.1816314201876792 0.7811231013549397 0.8321457630893523 0.20150559709807525 0.860811475883052 0.911886721634934 0.29974588137947367 0.009215358753023373 0.7902884303547032 0.6699645598604383 0.5864445081581896 0.770842739484914 0.30340684673285967 0.7357609040217753 0.981505891101327 0.09416520295650288 0.7825865451106634 0.049782372682869225 0.7809546140711975 0.17136060991956303 0.7150317354791036 0.9650317834964354 0.6046795407777512 0.5490162929625242 0.9544042594511081 0.2936976897042739 0.011558444249350375 0.12240512281994587 0.6540675806518195 0.08996703019022934 0.8826040496177711 0.9072817825621773 0.2916098974717768 0.6914522773602321 0.011859453757444682 0.46374140212927806 0.4397059441353458 0.7536386279511309 0.09574728921762066 0.19922197569813416 0.454979216646724 0.7549209413003319 0.8438034955200862 0.16152082831649306 0.05437715475673244 0.7242138108044249 0.24720326747358723 0.3477345462592435 0.8276741651741335 0.7487827132626703 0.1362129077199543 0.001320984340954956 0.09506814068039093 0.15685790390690002 0.03367864781082752 0.8470206914138128 0.16043869716018655 0.13775683405827588 0.9688892172995127 0.7221645594857098 0.9336855435065103 0.9616551400060237 0.35462601385210135 0.6029333314025352 0.09966844820630893 0.8918100153214777 0.2499411624799759 0.5954234823050707 0.4966206726320508 0.3439539582745095 0.33218038313720033 0.06391374065821431 0.1335099153998387 0.8407411832854667 0.6839938915704437 0.7025145725587845 0.32367219160615857 0.833542625563717 0.06455541762449779 0.02142286473427324 0.7411457282066031 0.5195498072594588 0.6789358994199813 0.4152929461256728 0.8164831173905581 0.36906190301313413 0.5855549181639818 0.7087733726330739 0.7804223620706027 0.19599426193587177 0.7888595811372999 0.833049470219578 0.836648091311992 0.25406070981808915 0.7528568329841616 0.07726891508287348 0.3989762479199409 0.021746348555662487 0.5408522848057792 0.1800183911417943 0.06364739085855164 0.05110097810172176 0.03772494269911708 0.3785568544212773 0.004014577230131988 0.49854819309223863 0.743293243629589 0.417301513849203 0.772763916778455 0.7303182541903878 0.43880553383446286 0.4321003778404723 0.07590743384676091 0.7007779966593821 0.6233134704043266 0.5731151524298418 0.9431391254442129 0.6388440539740106 0.9926643649395334 0.20667683334680742 0.8607469338204383 0.9458305657309517 0.39107425551824615 0.5452659848959636 0.8561246401296392 0.10749616257210859 0.603720842403787 0.5948078202103917 0.22702996720312008 0.8537190750260626 0.07852098629905047 0.37976233686012084 0.5940853099969731 0.7611435192863576 0.16623540294872974 0.5090279850159877 0.22162090106157117 0.9873043706271202 0.7502681151768351 0.2794668263386568 0.09981024927588922 0.8491513341680375 0.2010063121082023 0.6788982115530632 0.1101500255437563 0.5145577818966668 0.4342717933124779 0.8466967526871059 0.7066881097194049 0.16231336775386396 0.4794321364386488 0.7776698352813118 0.9370045567393532 0.5842640850365449 0.36994639204283697 0.31367863129846074 0.2998026643330437 0.3526769383421683 0.35759634464137424 0.7575059207867959 0.8900679409172702 0.356570519800112 0.42138116802172987 0.7647628323857197 0.5181050821221338 0.7226675064065391 0.7930300047657638 0.5221658976888925 0.0053641621226532266 0.8797881818260435 0.7533130485388948 0.6043047776337308 0.6578191134146325 0.7999187506961772 0.26680895071908306 0.3160599435904867 0.9967629094749736 0.2548523701072931 0.11751227718364798 0.7741189734167138 0.18723548354796793 0.2642143523207281 0.08364179503376856 0.4585790295519807 0.24590214968748836 0.7788475936655586 0.3472367547744054 0.4744118500787389 0.03305157291625893 0.042948004865868805 0.7726047513358922 0.30260736685885203 0.23748413251301914 0.009003928703530284 0.9074115698516232 0.3921001106566352 0.04841292703556388 0.6415076358694173 0.5611798148290632 0.546122984298814 0.9751013075811158 0.9385707655813983 0.5981897399198408 0.6904203573769336 0.32281251816132583 0.02499020124204898 0.1452522865891941 0.275793137465408 0.16274812817748618 0.725974256910283 0.7595507247916057 0.26373655774528404 0.24400424996160064 0.6176534626454802 0.22772421233613416 0.3714704208651147 0.18717527258482347 0.6686647072747434 0.5745926810648999 0.011451558144208174 0.4720869322540422 0.23651046121169317 0.5483995650154068 0.15049608317913366 0.18624226598649074 0.3630601488218028 0.3191747881188479 0.13492888638266154 0.23591468831900586 0.025473730915413273 0.37779777910895485 0.3807168262388547 0.7384250857062612 0.2419924102838683 0.6555659946389389 0.7113307903429457 0.6119905805902909 0.7064548234223801 0.6975518012842582 0.041121044173785926 0.95863457632338 0.9310617374084719 0.31775638939666984 0.04827943217127695 0.6959991415644002 0.7138256587344018 0.5400467214083454 0.4590010440612814 0.03550229468904875 0.11530651823136895 0.7860872547222 0.5732467306721385 0.8271344541688084 0.02864807066585906 0.20995549871334906 0.702547405702511 0.49528984235159645 0.8575941190118562 0.7988564459791808 0.261579704297495 0.2753798931132071 0.7790775622837456 0.36032788439303165 0.3577817461825328 0.7275779999426356 0.5601391831011707 0.798554909611189 0.8570671575682852 0.051895612670677105 0.923347647796495 0.1458887052396427 0.0687339201790409 0.55789189239482 0.010555540123861173 0.6603909458160252 0.38895424294430714 0.7526704810086234 0.5817410269916773 0.9262021087894351 0.4697704208665473 0.9024184175871149 0.20384821930402652 0.30075825938530376 0.739310897442101 0.5955941069321633 0.8339538420479455 0.24135753970542784 0.388979572796318 0.7899722045411016 0.8817514833483139 0.013304129094116046 0.26466066366520624 0.06110578581967907 0.048259180864929974 0.03283402551400216 0.451520393357529 0.28636647532525616 0.9808194735751177 0.6302537764310937 0.7257759168981374 0.7965762266800598 0.058876248099811845 0.3321990108104972 0.46184085576099454 0.10634468190995416 0.4306753454489698 0.8566476738355868 0.02954591107090676 0.6945784326259284 0.7579512860978904 0.8902977741580069 0.19367219918293477 0.9482297615167189 0.9938255373636277 0.37514735914222397 0.5368238068064727 0.3052040660789548 0.9185565402460037 0.46959000207067125 0.4667052797804552 0.8364466805387878 0.08844510398691274 0.5690665038501493 0.9401588931922626 0.9925814251323926 0.3151135968052753 0.7722893084365049 0.6388552878412409 0.007074805102709525 0.7855405558451302 0.9612832713037798 0.9653204421596334 0.4957512655293934 0.4576472908945013 0.22696257924486007 0.8930211153517802 0.12158643144972237 0.42189636742812275 0.7166904218297355 0.4005192059467414 0.8040569895630059 0.5062763860207805 0.10624470000556796 0.9472457066972014 0.8439274107095952 0.5202114092282719 0.8058390295534339 0.2072329294734585 0.5072049520686754 0.1823473196862473 0.6983255382742759 0.0671833265997489 0.06609650671948697 0.9578316373578556 0.6774324095010288 0.7165421935776136 0.030151740091279078 0.7426173282262206 0.5048649100343777 0.5097803983839492 0.31737938420760436 0.5754851396958812 0.5373265294921209 0.16488270017087203 0.4129740497376414 0.3328930591431136 0.6879921713820557 0.7770758541260726 0.14239367972969608 0.6674258647582226 0.5816927678214402 0.07312027801063126 0.5458780242471455 0.28753105114664257 0.23904655914560302 0.30902743370910435 0.48310146246009844 0.04439248241947058 0.78199970101686 0.2228550107188877 0.019503758747772326 0.19120628702222708 0.5917143183885184 0.49427009040766756 0.34994500823414765 0.6107881438867764 0.06946987952949102 0.23386049007297138 6.815898610228466E-4 0.7693549834712834 0.6522192247152792 0.1277127090561141 0.3172625590221223 0.6474879253302278 0.016668074477538264 0.9965646185508453 0.7937963003061497 0.9236980540133013 0.4692225765821869 0.6938795998980299 0.6281400582867573 0.24167978962526304 0.36387696075025167 0.4634782398273469 0.9979153872805755 0.4792513718405026 0.016088766343293592 0.1049027283012115 0.9438551097213984 0.5551456941494662 0.9879168313523595 0.5534776710267828 0.41983793454885754 0.45922896324116846 0.8081729597192178 0.7720711359857783 0.02306958940509407 0.8723886120340274 0.16106521143776997 0.17284746758231606 0.1532198123339814 0.954245742770171 0.32778697006711743 0.13984375981863872 0.8659268533978893 0.6200122363732412 0.19773300416583206 0.6069898744871687 0.48393219942876076 0.902225540803188 0.47949956656698434 0.06123182815049266 0.10821027628642299 0.7987593210568421 0.8365531038841665 0.7867067806980185 0.5318089504554485 0.3454213252257766 0.5484275485716887 0.6434571972107789 0.700718727426965 0.5196148242315214 0.9555379374080044 0.18296093594941465 0.7889394674152096 0.9809530028153385 0.384173698576067 0.44279275538268803 0.15901584705586647 0.5853804562569365 0.4679620402633975 0.3340221498820458 0.9646633062607197 0.07126999263979372 0.7830681621193185 0.13749120571958462 0.9187090046860423 0.30912649268875303 0.49662601690552965 0.2589052550954266 0.7974225890376694 0.030944128394349235 0.29136906160220244 0.05770857059179524 0.5814143027705558 0.010927327494160699 0.1741581184393628 0.26100563598422144 0.7436286385929558 0.2561993443927031 0.9020789742896825 0.20132846805478744 0.08656385822921553 0.8842870815178182 0.6670096303916552 0.8758388292972911 0.7036990032528879 0.013584573851996296 0.36803337205315845 0.614593555981759 0.0942115981593844 0.17366124221476986 0.27080035621564813 0.9667904080606126 0.19542108730351126 0.45419590440592306 0.10391721353867456 0.2664504096184993 0.11399394388794049 0.8544807912293201 0.6746704412315248 0.22278237913167698 0.6347988750513444 0.06985426302311748 0.8998660917890489 0.7955138933274174 0.16540135891526897 0.25708508640905103 0.7422948897391614 0.5617621793666441 0.6345385545146996 0.1576449062332277 0.9193081056545719 0.758557538047756 0.004517380777386726 0.3259696642603275 0.4072167592359546 0.19392086574630807 0.48032975949187984 0.10922508469534098 0.7037025376452524 0.259436022491791 0.20238098571720542 0.4557550878734413 0.530155660147726 0.3485465620904137 0.19507834781513023 0.591305927043174 0.15683202064932045 0.08734599393255138 0.6101289285427792 0.09528355021612478 0.8159473736421348 0.918419124407022 0.8512471886674811 0.9337149045911107 0.7313040004994834 0.7433315277685376 0.9960739736187669 0.5612487841288781 0.13777125318550298 0.5256530337302049 0.04146101833238858 0.29144510810001667 0.28991793094248564 0.9839404989738755 0.8898754582032784 0.7527662645923224 0.8663214086769981 0.7628937706876097 0.6675317552969239 0.39709767583517075 0.36455449228120507 0.7609380230583505 0.9189584691547855 0.7082893216552508 0.05157794394030668 0.6137748818168464 0.45806050544177235 0.970123388048405 0.7852429945733568 0.685983396107955 0.17233761973585005 0.19627793352674816 0.5548346412875143 0.3065413420495321 0.8747723725673098 0.09926999808479497 0.9020633535135837 0.3991764456612321 0.37700208751795905 0.7810260044855696 0.47863911806616755 0.1047455963674947 0.31774056575933896 0.16464115878379393 0.8725385391126842 0.4703800274658526 0.9220177287508676 0.05505371084563393 0.4923019457617799 0.7889228312737069 0.7896557623815607 0.1157737453431551 0.737174369588495 0.712224468112001 0.015502937651319049 0.9794709953023011 0.9006307938196598 0.8331512853877484 0.4292973001547499 0.17213905333592605 0.25580215570140796 0.0337003980143632 0.5096340734251227 0.5237185470291331 0.5535459620984543 0.530404231764753 0.5743621645602904 0.26247506792962705 0.7273810753996187 0.21324633353283584 0.17883704152107482 0.04609238510270963 0.9442335056803158 0.5705995977098299 0.26380812896120187 0.7693550371287403 0.26591613418437576 0.22023696841601315 0.48632121700914943 0.10645847948274456 0.09349214458673316 0.8252535939165961 0.33986740855908537 0.3953995674413642 0.25514819617955475 0.9637787494244067 0.12411178249138488 0.6525634600698504 0.06793039309293292 0.34264471358258164 0.5598868354028317 0.2175377057591561 0.9546721424815025 0.5977187782738402 0.8617368398012468 0.7713479151234223 0.22966992982653744 0.8073940784493709 0.3781511167331949 0.24572377136600332 0.48013954491592903 0.7607732347763668 0.4887257627806645 0.44666818800095565 0.5652115911893222 0.9683686786847581 0.30784501251572327 0.052515945756928084 0.6302020980423065 0.3806818625080489 0.8860745468934926 0.014833315062302677 0.691120225354596 0.7888134463018771 0.02331122541414632 0.28992285519144334 0.2828519367622321 0.9212944660117874 0.5118349135372392 0.2593629764472778 0.38418469252149945 0.3226188654947756 0.612759637891035 0.15992466775292802 0.3338153133402114 0.2014957895011903 0.8359831110763268 0.3363730653172129 0.7337091220188731 0.5706297243092675 0.6415847418418991 0.15507453005630556 0.39965558982996885 0.1648751060532737 0.509724715973311 0.9719947547194556 0.2635917938963803 0.8209019121942093 0.9364469923070836 0.63558696223314 0.7582420122641577 0.2290465817821843 0.3043918901313837 0.623233690017152 0.6218478971565654 0.22681308391887178 0.25027474440311714 0.05931931200791751 0.24924055787670318 0.09352679613579795 0.10508886883336477 0.5436751934938864 0.6852323758033393 0.6269336250149754 0.21834273598586962 0.931052387305728 0.10728283742791656 0.13507575847886621 0.49424257497476387 0.9454421165261024 0.1766191670146392 0.8605641043075091 0.265583588922658 0.7874769568009795 0.9504695897707678 0.7694073327179901 0.710214007316244 0.6988111886799743 0.14279284144907745 0.08284543262606525 0.37528832873856144 0.7842915896468506 0.3968648326848443 0.4081757752105375 0.016127067485073088 0.26089446187274834 0.8903094342820649 0.8333481293276461 0.8793524923716309 0.7018191421933019 0.03163709535767567 0.890144215756402 0.7640708534305046 0.7747917362081276 0.22708197779232275 0.8620467592604824 0.6802236673400645 0.4355470425583857 0.6975036698800775 0.5154416006502012 0.6898317040830615 0.5306207965937187 0.10734775506226946 0.9665859194480778 0.5647072332217048 0.2879984945961612 0.3311345232020362 0.06181433737474584 0.22331144760576072 0.2298308475612132 0.02767283776725904 0.34474397968405257 0.519785917780937 0.6283402662160148 0.81817187397101 0.4117785713175125 0.8497876686006736 0.744206385179254 0.7716507264514804 0.21600068064551547 0.2106101249078559 0.7361336993089533 0.589521755324204 0.6695799822914602 0.8071727968361477 0.3109668711279203 0.35095087233233346 0.7161153376079215 0.03081465415535911 0.8956350011164439 0.9315736947165644 0.8968665022507035 0.6544277061982162 0.355369052944008 0.24037374256889854 0.3186211051686352 0.38531838905942917 0.05705104451543219 0.23300503302172382 0.8459811027527024 0.49628660407977643 0.38221794370655005 0.254568275091538 0.2871423714371193 0.11576325203109183 0.6311569531219519 0.6408258426242939 0.31978006100806233 0.8994490265709364 0.6276927463782123 0.6640683774776689 0.9124409084623236 0.22874848891525423 0.23368201906192343 0.8483209145866678 0.03688372640973436 0.785395414190305 0.4128938776330362 0.3983044724329381 0.5380973928697788 0.33701654375200674 0.4458694768029131 | /source/firstTest.rd | no_license | guidocalvano/ThoughtWeave | R | false | false | 104,916 | rd | 0.15134928934065528 0.16923374403608815 0.29405604975036537 0.5453877916979596 0.740269764474385 0.7227189430779608 0.783288431508119 0.8596583566574633 0.4406391355546023 0.6278452086400551 0.35540091250302586 0.9779152850210026 0.3472763887137903 0.8293192957485583 0.5969423537395386 0.2923752614591918 0.8276761148186407 0.9738780243927885 0.2605199781183183 0.6058862957489728 0.43147579160962646 0.3280098937570334 0.6802026499770367 0.36947784340057366 0.6488391115524473 0.056959871961363984 0.006925052996174519 0.22163900199226605 0.7746216807696157 0.4141202938849511 0.6228095135757374 0.34375989561878717 0.847100029733151 0.6408872855004846 0.945862135014187 0.16277261404785792 0.873207895890835 0.4092884127153995 0.7202885058965115 0.22987915833275996 0.32663902146946544 0.45316423496561253 0.8027132823612538 0.6833567426018462 0.2108387710995443 0.6284126237230672 0.400877254453715 0.5637805951549757 0.8158215134329276 0.018781529072221193 0.6989245104391518 0.3916076771554198 0.6659144847877265 0.8609224644141709 0.8121810246467727 0.8399150121086763 0.054943251169296436 8.930768506568709E-4 0.05436572544486862 0.039188451745382546 0.9960374474686887 0.31402456702307446 0.5714635257283692 0.036409473601139664 0.3751498532593581 0.02104537729693745 0.49902910930316624 0.19728852077321268 0.5905462289341923 0.1155798339512859 0.9334794494977533 0.8883454815906879 0.9849365514598085 0.18161659544394737 0.17297200181510897 0.6046395569353348 0.932583201895332 0.7242683894992861 0.879754394566761 0.8193013945498483 0.5306352271916501 0.7631212888795492 0.04341085115422194 0.22262354424587338 0.4621020830146685 0.8446980832077167 0.8832192185794456 0.6913151642210303 0.7472941299341299 0.8190370014750399 0.11787212211381914 0.2826294322754068 0.7812215708635728 0.6951134563379917 0.35210421046260165 0.7834239040200454 0.26635705261552733 0.6408093471440188 0.6823067108854267 0.8608144128382796 0.39333170129654405 0.7253607307420218 0.007000117637488312 0.6589362255227986 0.8965481765732677 0.3156912714038709 0.9430738377282262 0.7654264056269591 0.41735854608175305 0.13276561989240787 0.6949917555461664 0.8315937451317738 0.9443685043578994 0.8500731955163192 0.7017010035623465 0.7718437767730896 0.9685941062475726 0.7735797820931137 0.39477066072805045 0.39077809223840887 0.39061057724115067 0.5809601705635072 0.9413259892965111 0.49048861606341265 0.14749736126047042 0.31765269329023194 0.416632969285503 0.9959194260972141 0.9892974031531829 0.8482521129367111 0.4537271039432562 0.4027696638454282 0.8278726027877118 0.7439844391278878 0.05126885531420511 0.9002630833003733 0.6054518672261637 0.15048358387027216 0.8858483383350633 0.8502114592201261 0.6858316176572741 0.5248164644392163 0.17124962918074393 0.9141501171995184 0.3581032435732733 0.2566794154658476 0.8347251611080428 0.01192571860027758 0.1609457966143556 0.09591599588429311 0.5391190164876639 0.4679684629221297 0.38493637267510694 0.9969820125402167 0.869717546711288 0.1555811362520041 0.2909943833095945 0.46071063203257334 0.8259944270357804 0.1849306904949254 0.2949905774880075 0.7047071915352887 0.6109563669893062 0.6970281001858529 0.6117151561852162 0.02592921148248839 0.5945850102954855 0.6238346550386018 0.6258137559864184 0.5324843187683221 0.2429093859183581 0.8537344032563869 0.5629638694529346 0.016405281952401318 0.7505322694439404 0.10196326731660543 0.8714785965939216 0.11513718457790856 0.091973190209791 0.9271882267696819 0.09268854466414911 0.06669957150821626 0.21597969520375704 0.7582798202621702 0.8978666596888553 0.3019618599314675 0.14699721340480776 0.4994246925245748 0.4001250657261337 0.8371973601787457 0.829277809938255 0.26707472968785273 0.811626906560726 0.9310578751922616 0.6516461042173706 0.07883211902602605 0.009620116710693738 0.7523372499816805 0.7247305095441868 0.8622205047814067 0.5992146797736324 0.8683686919401926 0.147506753211876 0.9803944342586317 0.6164878780560873 0.20862762649203748 0.696289309663859 0.5821905607230724 0.10241761945536587 0.7840880236112268 0.5968462956275233 0.4755257540987774 0.3963065577615863 0.8501068703916743 0.29910151041741007 0.8959987113798626 0.1467745865236313 0.3890881174107419 0.4639270393575652 0.49665358620405164 0.7190612998353457 0.7589037089892952 0.69281862540685 0.12021509597295144 0.8892937867954103 0.10402620308835375 0.8837849413691669 0.43170676531595753 0.45953933052812 0.658553863007083 0.48057140956759337 0.28721514670575155 0.7018767406856155 0.38378548179983973 0.29737766352844774 0.7936644680558324 0.3453839665698788 0.4777293860739634 0.04751749634295854 0.3748177610307165 0.15685379325215842 0.16990677868732618 0.18200848093914623 0.8003224206005933 0.034213412997089576 0.8720368149687336 0.15286816502188227 0.05746872674891079 0.9310378316453222 0.19366721005161758 0.7559323047274783 0.14943280378041202 0.8612109226336531 0.5515403268873841 0.7793001816600968 0.38912585419302603 0.6947023294016534 0.10246204090189959 0.7860502041403399 0.5243737678192608 0.4485922075603559 0.5116663719551353 0.49365946539301775 0.07800225856865783 0.3018898556490448 0.6321460614049496 0.8775983993167811 0.615505083612174 0.6183566671852478 0.5151457909930021 0.8228621435809774 0.26332330048078856 0.8484574741370255 0.29799062076618443 0.8747382064574487 0.6923660588270053 0.2997285989212879 0.4226499687157935 0.39889984445260185 0.9542545143393767 0.41426523721415665 0.3269127290210684 0.25095315262108664 0.011872044558847272 0.37269438238896135 0.21977680140336497 0.8768551706665761 0.5411557569995201 0.5559139239950674 0.803632179004692 0.6181593767943402 0.5976655341650258 0.6390424215484029 0.8380432750911875 0.8798336883675332 0.03931851144932097 0.8664383544988984 0.25341349385881795 0.7334357505307897 0.17577860469192708 0.362311425896793 0.11754048547611473 0.4747186657289567 0.9753421664967855 0.16836264506852516 0.13404821275995227 0.6322798720583661 0.3017622927396397 0.0680540461035627 0.5089075454061284 0.932654783800123 0.07630593159740684 0.15652325256459088 0.9597344319864411 0.7672959722703001 0.9644398116378606 0.5458212142511744 0.36228065073419347 0.7781671952110039 0.1718921705208587 0.5570666867337812 0.02950935987731429 0.28122165390855747 0.2014394029447224 0.24427434561504568 0.9008733555242345 0.380821937485317 0.9696178949548353 0.11902752217853263 0.9828915420305544 0.7931987349451138 0.5116632299530938 0.5008310427230287 0.22684463286316914 0.5933342627801635 0.6133301495904028 0.004893313656125886 0.6553886568545436 0.32719540804266334 0.09984854580026314 0.29725075082285146 0.9571105711013322 0.19003086845069528 0.18512202233631503 0.3965625900267403 0.9565273813581746 0.2614852180681121 0.23837747681033294 0.48256660899496506 0.33675639353393794 0.10957238909257516 0.5608252421746578 0.38310219530229195 0.7430613390520862 0.42808961505503584 0.28149149720304256 0.654769744873527 0.9277046140930081 0.7558424399344792 0.27923561401753616 0.44671012976389013 0.9100137565063817 0.7410900241727074 0.11706701775473272 0.14265229446586636 0.22029590530370668 0.8609093710677268 0.33522776716723013 0.11329018195147844 0.1722106243704521 0.9827158846240687 0.091108201798073 0.7792153428790969 0.7654388412517965 0.6639670447370284 0.05691516126572849 0.5840447652638132 0.25896090744387434 0.6261147405912846 0.6901133044863957 0.408048059993644 0.6749846522319016 0.4101608920369869 0.47701184868780777 0.41449194449854376 0.4945399795836207 0.8237106796890606 0.5164708796772549 0.31408418963944884 0.5879833847912919 0.42320536007266496 0.8585521533304041 0.5573130437188033 0.9871325821673871 0.9400575671228109 0.3420419789977017 0.020142441986095938 0.4504776112894524 0.25569926669917453 0.2851243549705108 0.3354371709533642 0.8988914446453724 0.2580930059334092 0.27508456078602284 0.1406565528218574 0.7064900305340904 0.6710451217445385 0.47577502343209 0.16176865623599512 0.5197677839439345 0.7376994342727077 0.3256765538474 0.2340018053020605 0.7478612004713907 0.4608785220488456 0.23473984586507046 0.07196388005210297 0.11777029064002809 0.9554786998734861 0.9047936521572074 0.8448237703911885 0.7205934688196889 0.351914401930269 0.19615743491472737 0.3866638090732656 0.7883083641844352 0.6046461575795695 0.04219814388836973 0.17777587531460903 0.03535773750514681 0.5061156727162619 0.7739221771513527 0.06958797864025823 0.775276063531663 0.18395068225384192 0.3582949924575325 0.09407187661930483 0.17981879151247038 0.36809107020340326 0.4768327361983845 0.59789702118614 0.9865331836550318 0.7545268887990494 0.0751115400216924 0.37225949698221217 0.6804359844714496 0.8636282577742702 0.9991919346527752 0.4112290851792352 0.6764394996217313 0.893585586530676 0.6015569542624893 0.4246215333335418 0.3113951233510671 0.20326834093313562 0.7470147856979584 0.2558170724142206 0.5180983726395331 0.22384896080245886 0.4781176026252951 0.9106106432796219 0.8983279795035073 0.7948282060673159 0.7100004506572624 0.9900926417281805 0.5379662142353546 0.14763665068485865 0.7161449442368775 0.9387240950408987 0.31447570801406766 0.018776816844227606 0.8394248710363834 0.6681831693623639 0.6354838167128443 0.6645087909139565 0.8168917138868418 0.7982246804390499 0.5169522226196929 0.8703717990945914 0.34210984385219056 0.9677082026423719 0.6603861131044112 0.4013727606058004 0.005453040450691615 0.41211409850059133 0.7207640838236173 0.7224625742517088 0.5506250429273127 0.2782813662295093 0.27034600968641953 0.2339591481601283 0.4647686243170015 0.9527649570568675 0.05824288041360848 0.7128660667234499 0.2798794180494526 0.01712408625267814 0.04160942796767597 0.12857393561880892 0.6048420126995127 0.31161782656767467 0.32154650554910646 0.5102216550488083 0.8302575731546291 0.98819739704735 0.5552769831530932 0.08303261306860699 0.6523336114867061 0.36434942649318425 0.6358854938892666 0.46223119104998533 0.04745830794193007 0.8512581138546065 0.4318889219025973 0.2956828016705182 0.09568339055327846 0.2229182517815428 0.16113075387197517 0.38019068977663517 0.09919066330865611 0.017245479744594894 0.5369696926108981 0.9414213715767618 0.7709196455145925 0.9158938315555272 0.16945879456061796 0.46371950700265074 0.8788843644407962 0.5364047414807418 0.830188137728838 0.18361253646012998 0.6233100038165256 0.18847612873130615 0.09669101213667253 0.11959386784244519 0.19657054584009848 0.7955536508303611 0.5807895397909083 0.30081309718129723 0.4300110604935302 0.36361084796959264 0.4149827554752935 0.6785903471802703 0.16708922743958154 0.09089839819206402 0.921956188672483 0.057210966141498676 0.018872660123977147 0.45560743636372225 0.4245191456638927 0.2642460686219782 0.45577965705674983 0.3928051017054792 0.9667520833431681 0.6956833460118579 0.768553351876624 0.9776656262213257 0.31187303362475693 0.31744148343308953 0.2918097871706816 0.8689330532398838 0.958308636805495 0.18238089555245007 0.42510302672670197 0.13471699887032762 0.07268219736157755 0.5307123099080598 0.40930120243792956 0.9821360306917699 0.17690712414522325 0.5817438762327694 0.5201691966802376 0.34298764981394014 0.1463014237612532 0.19619660639331515 0.32640569887817705 0.22953801954897834 0.5766146157226129 0.6908567837300399 0.9259828878321975 0.26768838201131573 0.8600860176772955 0.22112340194675695 0.6224897938916862 0.8391894072822641 0.3741290236722228 0.31204749526994446 0.33228080826275064 0.8202386044660767 0.030962867513939507 0.31474913352778977 0.12358334568774065 0.9907100605771518 0.06249026451091533 0.7926374890402713 0.680319531629238 0.8064300127221001 0.32194534926696483 0.10064790391863687 0.15624861344018193 0.3763253564175243 0.5723131287687213 0.7107489803775399 0.839138469216852 0.16693674253883417 0.20124841430716955 0.15826227865757725 0.28002595910098504 0.1687017334959966 0.5531984059568281 0.8360755533088491 0.059227453262002405 0.0015776188601083208 0.601659911894503 0.24762025620525774 0.15861254453971374 0.7119875294887145 0.9979786529994705 0.7445223780181651 0.9887554137387278 0.8187374223167132 0.6278246481709913 0.22112534569155162 0.47992999397180147 0.5024659612422716 0.6824544168365069 0.7156224065547808 0.2875163365005794 0.7013528475307759 0.9090970314547874 0.3763252598042456 0.0852377106196276 0.27522581224470877 0.1432776942042323 0.7916344510886472 0.12374878380265886 0.3308697997907254 0.4456192302665718 0.25356625402457567 0.7174476760083011 0.26325679618699904 0.1666384020483077 0.12723002131804706 0.08898518744797901 0.06057809924564339 0.04528022452405933 0.13243868682612003 0.048797234575608694 0.9453752429433675 0.010410485114164314 0.00867440065122016 0.3160870540217997 0.4797298592081871 0.6992128029957078 0.7309203924578119 0.898035997471842 0.8493866361925961 0.8324135906309563 0.17140886011914813 0.8356227618446818 0.15984779397939364 0.4934842227255102 0.8611242083373837 0.7972832266679306 0.22689132701149495 0.04744691428267711 0.29493986962072116 0.4557435303143692 0.5246297451362927 0.9046370247264233 0.045555585574331925 0.3187117810935314 0.6661394534737901 0.5857510356238045 0.8039418403790458 0.700714367927113 0.03485565934389734 0.5767362208432121 0.3477866917039394 0.8212150529787255 0.9593414129373538 0.27555441418831483 0.35447922095721096 0.593256138442088 0.12970320977495176 0.9275789810038151 0.9214534333403439 0.5272130431762007 0.7908403417361185 0.8223604762081411 0.6342548159884001 0.3158546648604308 0.615878420858875 0.18172329653033037 0.03148139804825756 0.08975383227421296 0.06255237526489477 0.10763182162888374 0.3557899339018279 0.03619358437725051 0.445967421388053 0.6779916667723407 0.5514469270744894 0.06006667823359646 0.9230814635484027 0.7795275036905895 0.5714431508430051 0.4318669261322069 0.07741567825234141 0.549106158073808 0.19097989193357912 0.915723153821305 0.8169146270414426 0.35834984370618317 0.6891734535320003 0.6995275959257878 0.853540947588636 0.12111099409496451 0.957114110817214 0.5595622789022715 0.12010406267306739 0.3293569940756883 0.8179545259064595 0.0836284475320318 0.09798607954953831 0.6302391382591379 0.6034917915572233 0.44953614281845933 0.17884233505897007 0.9455974513985006 0.32174602424944254 0.2099115212778041 0.8640180699401661 0.1048787814767812 0.07132824258562398 0.7696322624895299 0.8173367872234657 0.7134227401724301 0.7109281809657415 0.7588023157838847 0.5300796215870037 0.5102963523788349 0.17604303745583672 0.3020286573739428 0.6313897559325716 0.7833347297120974 0.25010614050028224 0.26386506841874835 0.682999258510558 0.50377206006924 0.04346406063306185 0.9106383265091678 0.6340902863953068 0.7188233800923061 0.9111901216931182 0.07047521745630059 0.9802664420302258 0.3978042215502833 0.04951031464413058 0.7961988479995021 0.0700241326003932 0.36585189383903793 0.5506577579105523 0.7398654908750264 0.932777947939979 0.17903879361252018 0.26726028137326685 0.9330639724346895 0.464408028618122 0.7956445401824698 0.43990480610444316 0.4337017426357518 0.7994615590024993 0.6805801750784476 0.6753116058353693 0.347852833750285 0.22355447324218325 0.5434345681789138 0.5275903302700721 0.06434482491328208 0.035183577769529406 0.25533964703760603 0.6313385152336382 0.15011346274844783 0.3530734081819953 0.6158272743504062 0.0628830347423659 0.24595477358131057 0.9833461320196877 0.5480776287209902 0.3647533153278608 0.47515672597900727 0.1102168608169769 0.9894353803785229 0.458514532425887 0.3315934687659897 0.7545808299755736 0.21379607939954692 0.49237816085334485 0.7492168760105813 0.656773043482009 0.5734426135155927 0.5225460520350307 0.07148215952626913 0.4829447599431562 0.6801372016028968 0.8110332855546185 0.9370949624514655 0.028915853635657962 0.8587516245203484 0.437754704715969 0.6775699623223271 0.4010252052707558 0.3014447238889951 0.8709478811233096 0.6824415505203923 0.9904163637379543 0.7263726498712435 0.11754579177503954 0.3295696850772978 0.42535922573280516 0.5021441345277041 0.0839002501885745 0.5008347059542336 0.5856222228375593 0.13770937343944378 0.8172971323240865 0.4076171386476837 0.1935746625002145 0.7570979431633168 0.36999296816407323 0.4358097005033289 0.45861149163988546 0.6793975822614341 0.13004170630293677 0.9867102949582381 0.7446582633171103 0.5065921555544907 0.5969695971434774 0.8379966277695098 0.2549269299135527 0.1853537793984068 0.7453950312737755 0.3780843716603196 0.2837578054207379 0.3536107015583032 0.3159792752443714 0.17752221227414966 0.6722972356550041 0.9086891026532143 0.6006334119854104 0.7526794037351777 0.7229238457690723 0.1410532787505694 0.5476951657989906 0.04983194935724411 0.9767512325169201 0.9661455556698577 0.3028292799365122 0.13185612113085443 0.9391980282669918 0.7565536722240999 0.6226932164515263 0.785088925850078 0.050531437865032114 0.027710131154291995 0.9509499183980147 0.2805167450082219 0.9414534359945276 0.7625541247157487 0.3299974882319242 0.32130561558467585 0.2882132240358094 0.4740637805874576 0.7368258124755026 0.2877868122574859 0.38739404478791317 0.19929255683360791 0.8776980799828108 0.25136479851691573 0.9074356698834941 0.4945206984106095 0.9538949718258348 0.8704142310787415 0.14211872892404487 0.15835839779971328 0.4195482285844414 0.20408587517476262 0.49065059086044027 0.8481149421090765 0.2926291303327049 0.6840418933547486 0.8196567485992066 0.9744737147209087 0.1514465432990777 0.9694928071648856 0.824295881972136 0.2604900573402016 0.551548654643125 0.17751011366473912 0.49888050522879634 0.13315551256194402 0.23171081311865804 0.028440022504342632 0.3517993861793802 0.6792482005403713 0.14273186470385868 0.7753450791063915 0.055202099378138136 0.2419159002840331 0.02904951680052259 0.3967525388623382 0.8068196088535095 0.8604151359620422 0.020725262923662502 0.42358948291723697 0.7994597679237572 0.6626557047503036 0.45213739251866736 0.944036323103771 0.8431294536042441 0.008676951375160447 0.9137609338939057 0.7841319656468891 0.3654004893860443 0.6529085631374932 0.3647011893647124 0.597315505298027 0.7275492926354661 0.4440452195611714 0.9861873803254305 0.6126909333743319 0.03716513950649181 0.11359112760938739 0.34002197997779715 0.08965236872718896 0.41233567360837076 0.5176065615927231 0.3110197367592562 0.10560986100939584 0.9581822103389436 0.2909159883082586 0.2587602384939749 0.16390650308911536 0.5534317750016267 0.2490128249372 0.7815162749095004 0.25061949371046743 0.9128765568844295 0.030123373102896922 0.5040098768298074 0.39178948201381836 0.2909858448927609 0.40836843587421845 0.34068658340257785 0.4667238383907215 0.6225992515942512 0.42868799918149403 0.7574875403482754 0.018371116812465837 0.8585442045369832 0.015170803989094539 0.6576385071344032 0.06836581283033971 0.7471850480237928 0.9866572317855172 0.6507634080757722 0.49219356562104977 0.3373245375776026 0.476960315880674 0.6296694599776548 0.3390290196839103 0.03354330299372554 0.6017767985129656 0.7754398208700387 0.5911497596399808 0.12197615802296013 0.840335877801919 0.33900878627889564 0.24943166936266437 0.3026890150032716 0.007060549763271418 0.8061837815421291 0.3042820456520382 0.9142333131001585 0.8769927604384818 0.3431170754502517 0.10546917123480926 0.21661998471438892 0.4984600294645015 0.7405197769921364 0.8796576778832909 0.31086781923974016 0.9774382063547601 0.4274490678425379 0.6610614990799991 0.016446479580060402 0.05029062232660286 0.5092892271320891 0.6984770956423209 0.30234721396146436 0.07504197799575418 0.19958598914930936 0.4810615149650781 0.6731794754907753 0.16607015352005516 0.46168239225755714 0.8855714037355277 0.2486047442670779 0.07307726952132187 0.15364392917698655 0.5893805586428116 0.14258387986412568 0.06242649095683872 0.47874957263260043 0.8164452230618434 0.1483274264642923 0.49740882508502415 0.08386419908807785 0.6874877623122222 0.6655031145254128 0.033730018622500246 0.7192973946966975 0.9310213060328949 0.26816850547844473 0.34748414131957117 0.976292186095101 0.4784389135545086 0.30506084440599623 0.09510774720305981 0.2520767270856421 0.5142429426609124 0.44829028780573277 0.36828415705680806 0.7574507961744208 0.8815914976465172 0.16214661038688938 0.2514947143887065 0.07816816243607672 0.35369057431216244 0.6506439369348225 0.21588977743082838 0.5289147430072815 0.17316198793490367 0.960591368381173 0.9903795081532514 0.2974717348255228 0.4343688272327181 0.5663923916552347 0.23932024051138256 0.16669200468525214 0.3377816652956671 0.9434391490903469 0.8458996771499918 0.42320671196002646 0.08082024128331466 0.3488823478787956 0.851299770473716 0.4143186989367805 0.16620722162670842 0.27172122050780145 0.1319436305549606 0.0983279456967665 0.20672176442907253 0.788572157296554 0.710608667013036 0.8485734376417542 0.8843424209475155 0.36713896015394765 0.19191503736737103 0.7177650148945145 0.5821206087957991 0.4963211886166793 0.18867517815219603 0.3467177317657302 0.34872700599840056 0.8900615881481904 0.7628161710360402 0.7103903238043273 0.2784653674752998 0.9114475121563216 0.8430642688978909 0.9341684702278278 0.09364920586977976 0.2942451543667979 0.3013062317733429 0.48462251665323364 0.701588876132701 0.20095978880907595 0.8705196999491774 0.14150869365665952 0.7650379058940494 0.14967895314359358 0.36939108497489814 0.29157127477574496 0.8727988750604052 0.36636976967561774 0.7622155065618756 0.24974311123138648 0.726291021595065 0.6749656470038145 0.42740072377802274 0.5096299985069862 0.8682330227869093 0.12449154814946706 0.6338731651127892 0.05270444000124419 0.8711960410702284 0.17064125053632095 0.6319102757789633 0.10442244764285535 0.5766340412136229 0.3786339185913906 0.9534009323480761 0.015516918023076198 0.9290990764345133 0.2948414421590162 0.4862613611308434 0.26800958191068736 0.18570186376288367 0.34659213984658743 0.6387110860661624 0.9580230483584798 0.7974788282856913 0.37272022822279094 0.44133970399872147 0.4912629807296357 0.05717841004811364 0.6618450330550747 0.7483719769026234 0.6420802307439054 0.6870814834451205 0.1810272409443362 0.7942653948117654 0.5733110017478598 0.9008215828778311 0.014364211198320098 0.23775058271452254 0.8916330325015095 0.11477289777929689 0.899250149098556 0.48265193625693514 0.32196390097354033 0.40742484871194296 0.14744100475408617 0.5033020654497004 0.12446768844887912 0.029264404731494498 0.16872806808015228 0.622090373596635 0.949024230449003 0.7510381852687446 0.2769381118705341 0.6876626254955305 0.4245550507620024 0.6048682809937299 0.45114258982614264 0.8845148583215621 0.41438878843903537 0.6205472565899534 0.24937145547516315 0.9682017585118634 0.5338879750525765 0.488439351509409 0.12779237136295074 0.06510577435785792 0.22284820711468079 0.6907664577011724 0.005229074563959157 0.30169032150138786 0.3927197882971214 0.5346588620944371 0.031057686402991003 0.4353866342787617 0.4274907134739141 0.693202622119883 0.16393300147290513 0.3477404162645764 0.10710972802144325 0.31481678149218695 0.34946130343748016 0.7301712100159956 0.8436159473148065 0.4966451312059129 0.7968504680894875 0.5436736832569644 0.47268193132269 0.9342490560308766 0.09773152105569372 0.8660169043554067 0.72706542415665 0.8577392111558952 0.7047606684778351 0.5534739171383817 0.43350385568196126 0.9435857924342357 0.5932742082251378 0.7388407457557082 0.4208757429914841 0.7516981640872487 0.7661833687210603 0.5634804703720118 0.1049907325014603 0.3789667354918216 0.6304596132076277 0.6983543033474883 0.7241607020784024 0.4433111327486773 0.17297559890501624 0.14747867653786773 0.23974201727553324 0.2163803365148279 0.2941303939649743 0.5469735097705578 0.987128832445015 0.7084024507765242 0.6196024870790459 0.6738433648377868 0.265216414977842 0.7752824887111045 0.6037370286100406 0.16086011338989836 0.18614217703231617 0.865899021571788 0.7624889991091575 0.47739200293033524 0.2721513030129875 0.8566514414720683 0.4295071258120925 0.0807504867779627 0.48272314552229956 0.5306616659584236 0.5158581301691392 0.268603383110536 0.5826331824688913 0.650634947353482 0.3720543690291156 0.6702283565413941 0.020415071618894887 0.2472370977645758 4.624235437584723E-4 0.5028585305338408 0.7767252705908948 0.6989458482689775 0.5468339497521473 0.6890976805688191 0.9634032953709862 0.4146863855309805 0.2116146232598799 0.6823595175312596 0.3065582915191807 0.4471625793613917 0.46485023108487666 0.3795118428970067 0.030802509066796824 0.1626464179271503 0.722924075449325 0.8133830936830693 0.07679116680634201 0.17209067041092352 0.45102316156743394 0.4125154146237906 0.030517003023883604 0.08878049184989989 0.992144811619178 0.4915247039393047 0.6203473274805757 0.3193415369806665 0.2916197840646517 0.2926548075272427 0.1465798850801544 0.017774309816829592 0.0855416828642025 0.33758973739922826 0.7483333864242295 0.5362846933125864 0.052156607119078435 0.06831395706078025 0.98387751522938 0.19203373659732226 0.887180962484811 0.7567878547094646 0.8030459598884053 0.9355299548459818 0.25717977218489385 0.4454314880435698 0.6241655495815773 0.5542402217140591 0.10955133155983587 0.24388693121919058 0.8929733098581213 0.3740954867000156 0.873328657483137 0.652255343253716 0.19728251471677216 0.03192069224302818 0.6719218064985745 0.7069824415003254 0.7386977078474452 0.7834182883440979 0.7870895762229138 0.6527912760895337 0.7214677312956123 0.9841876213793542 0.2628427242870349 0.5325392023507233 0.00860512478387676 0.8353630676552534 0.7186455303039603 0.5566014929801967 0.2166890833122438 0.22080587603124646 0.15319588996743716 0.36436097951334556 0.2733080656050034 0.4793860573333518 0.7632285952888436 0.32260967534770724 0.34488769772208083 0.2537423543831998 0.5762005906617628 0.8191448379765133 0.9576882202819331 0.13600968962043414 0.0013807860014658502 0.10456851033928871 0.08104503828046483 0.6913550390424851 0.24078111129183222 0.611276046562021 0.9194648765523998 0.5817647493502343 0.8695299710895734 0.8358460420885293 0.16897921755451928 0.9353770818292947 0.7947024935596053 0.8095200068842793 0.533195564912368 0.8107558989223895 0.23228703608522483 0.9011873378028302 0.36577647052845375 0.5908529109030273 0.30388059802327616 0.19826742716920176 0.4086149631054182 0.40800708346873127 0.34787037901317996 0.9931071620372974 0.7287641839826896 0.24918201353798008 0.0642726520393152 0.9659889353445194 0.12221850830699998 0.2197428811695008 0.1680640796421975 0.5005705930368842 0.4489315964304138 0.5544269754590012 0.3127547382868012 0.19355546364894805 0.5909969156061854 0.3557623442397858 0.6641162064088105 0.02761722235285924 0.9847045702425157 0.7130419441404312 0.6533922352808417 0.8650805470178285 0.9672277157058838 0.8090610006632516 0.11998308801850899 0.8895856257462487 0.4999806923055433 0.8571776573946305 0.26550959753619885 0.3739408004374387 0.18323031591795758 0.22785813588816695 0.5843260510067997 0.6845940669592931 0.8603306354570571 0.9217195101272265 0.2099657922617243 0.7466311273075297 0.5249904962085189 0.6251845998797059 0.9226670984655858 0.2879338913995676 0.6912725213307551 0.9452146888266693 0.40004140026711 0.057110216199129704 0.8635610008320438 0.061188325798749665 0.1611091744005677 0.8670316681753543 0.25252522430632995 0.954844734906519 0.05334394383171681 0.754293466643035 0.42314412985900174 0.5833130962738144 0.06302788380587765 0.7646365449774972 0.1321467984028858 0.8455997527325696 0.691137411964087 0.8753488044784818 0.2147627154641535 0.9595072294462673 0.13014052898848993 0.347888822092264 0.014464600110226766 0.9138106309038126 0.0015023744385465054 0.36745210271432727 0.514425767259828 0.9896182795490012 0.4693588628420383 0.17092626219450724 0.025702983523040235 0.9551952765525089 0.13273300521479237 0.9906583762373883 0.014694955474492732 0.4301300144167973 0.21683185918683923 0.04868413475507116 0.4112152376298337 0.08757537279340244 0.2333769968449385 0.5245448479220766 0.5285583131184758 0.8173978913484178 0.46058447619265985 0.8895453999530268 0.548225440955974 0.7084196665379241 0.30532500684887987 0.25128221093817293 0.3745171396726398 0.7245394054773646 0.8531591078817977 0.3844972469935186 0.5479871343806855 0.24737040856213077 0.6863554462990962 0.9377905498893268 0.8217927333110258 0.2371354054114777 0.3556655581679825 0.9004502650619353 0.9512645841849023 0.6239086475704215 0.7285412590463336 0.7896358059948694 0.22455886713804096 0.6828986832430836 0.8046460163618294 0.1753763202541574 0.27104308166441693 0.5703151268036463 0.4889247178941247 0.09932319679346235 0.7241064650878545 0.9004661348555418 0.24316123367680142 0.6629217519897201 0.38637450585063593 0.23008285001209827 0.49886646147049074 0.56164409397936 0.6478466090334146 0.5824707454738394 0.20750410399493024 0.6241516865761622 0.041264424310030634 0.691937432042656 0.9284752078867178 0.4787321216492947 0.9840582060928791 0.4780495101205616 0.4109469154814783 0.3140821633877866 0.9525068170035629 0.369621725543658 0.7125242273705219 0.32748498602134357 0.5612865594395811 0.48994370038708623 0.5159286774734656 0.8606559234811668 0.2803730036509785 0.3876008315861973 0.4542608684024628 0.4120085514124284 0.8112989257341647 0.9513476840388734 0.9360884986586038 0.6838183847472722 0.46606059881092676 0.38284369513277516 0.17351545051091743 0.5737387869167137 0.24450369736578237 0.6176624629955408 0.8595513986178175 0.5523499737975104 0.689809246485153 0.38283769862580397 0.2155156617922831 0.18189843384768167 0.016739002048344775 0.15831030756795061 0.9348729534309881 0.626111063012079 0.45358877219320703 0.6699924471763211 0.8868128413669472 0.8769442940576343 0.21633723129884652 0.1837666748447362 0.3110487327360094 0.0813557052996664 0.04226669059043098 0.7925887786457907 0.6347262773871095 0.7896614751073354 0.7503717895326584 0.09296560284598454 0.972995070501819 0.16677042244747387 0.8671606246192802 0.13242966148793345 0.8367118188506107 0.2085609021814786 0.9589921208173907 0.276234939699854 0.5639070300479471 0.7878945599855653 0.9458825668161618 0.6893488720824315 0.5160611032002053 0.9182363358651472 0.3367533571742858 0.4589176665150677 0.13590044084737507 0.9675779056509716 0.022380605819070643 0.9927338089536412 0.5503375265990778 0.6813369747089499 0.98984699564603 0.2208353081266139 0.5203435025667957 0.7801325388609716 0.0810848177302067 0.30021889439970806 0.1889553855219147 0.5868122318752899 0.708138448831044 0.9767501150706894 0.25928479662816295 0.6068344220164635 0.25906337006194935 0.49370533232934943 0.9463082196947783 0.2370845338254507 0.7809182411815919 0.9529818545040222 0.2893837148356857 0.810431514780333 0.41009515794076123 0.3328641273171128 0.18685274777019356 0.996806737790538 0.9352726883151855 0.945335751862578 0.8197494822504812 0.17631455545108066 0.8245651171549453 0.9762779921133479 0.9685547610468235 0.775860172448152 0.025980421854692404 0.8934976817451108 0.09509522344129484 0.7180065380679779 0.859088600776053 0.930378743145893 0.9611028227277572 0.06924972718447742 0.35584114093900887 0.5570527996392713 0.9856131971394414 0.6946273199406564 0.47977107569857613 0.8363364743513884 0.5052965329075596 0.2265521806281806 0.7553427517540279 0.8282841227576662 0.5949774323595342 0.9379896937132024 0.9730090862434653 0.7550564606727052 0.13001524403500964 0.17357379895502612 0.8005730005542826 0.4669070504359072 0.32788007727867274 0.8369828892690857 0.32773931486481334 0.8604403423307211 0.09317354745741979 0.05887399814663585 0.7982821266034458 0.2918961609966897 0.10850774608972269 0.5769000109503489 0.043041381631441444 0.7010676529656452 0.7630699000812041 0.5478106611180429 0.4170025734488364 0.11556056610458088 0.6913639292521936 0.4813656510688661 0.36500090704892385 0.44045407139276505 0.9065346148970393 0.7367638666445634 0.09313208379244298 0.9550241073614941 0.255843968423189 0.5545197008953592 0.20673929200707164 0.7360425271230442 0.2002832789304808 0.9101940158834089 0.5908865069462544 0.30626641539721056 0.7259687136459996 0.5711137714673368 0.25551604651441695 0.16708348723938216 0.531882112939022 0.6131907036386294 0.3450283088582812 0.5158815259868537 0.5066829691980385 0.3121766104800895 0.0208101135541382 0.3715606963399939 0.9067312842623947 0.7621685511328095 0.46527735956621985 0.5357097679758762 0.578125374666915 0.7939044880176536 0.8272866977258008 0.3023184214682375 0.027472679696519342 0.7663662654777192 0.5313100998238509 0.5954995355880786 0.1392755221825407 0.5874329746407791 0.00655521799290848 0.26254685371996145 0.8622280976438363 0.88280520966257 0.5572532221728669 0.5787551739872773 0.2423848446153538 0.5771462410510076 0.7916254082907009 0.6307814576773758 0.7680743738610093 0.27736359178988157 0.7545783481848325 0.13692706167692137 0.1095974854504903 0.7551803550019589 0.6679670437531602 0.43557283840413374 0.9861614820131193 0.5146227734444497 0.9723037700907496 0.8271002154658249 0.07316727536605461 0.8997765482147401 0.1850802939619216 0.18998319282210974 0.5748799817943063 0.449141634562068 0.9434095820607331 0.8083377027499856 0.33067719758867054 0.2351408768145118 0.48248226293243746 0.6460658332481729 0.09748410169314004 0.7408155915410379 0.7163724705169483 0.8818351644869193 0.9566675788131763 0.42046649391428403 0.03401644156860706 0.3794835891411241 0.6305765385900306 0.4669761687102192 0.9007263471812853 0.9100425324468041 0.8620791120881615 0.46577472689498045 0.3643911761936044 0.36442292501437923 0.9098613879248321 0.5538876528414309 0.6055551165926625 0.8999092515674407 0.6429171943233677 0.42223124506306475 0.7418533200945734 0.948959513509345 0.6795713089736933 2.9697668911254915E-4 0.19982381394949666 0.2854094113722936 0.5095436475677515 0.9445937710050926 0.10090341755808141 0.8910317961112648 0.561975376226208 0.11569289185847875 0.2734544869686205 0.19450279180481223 0.7380021204497134 0.07344695821880853 0.6989586692903862 0.36517871008673364 0.013373684149017961 0.2459311804821056 0.42805267763657584 0.7356883242958444 0.33478342519280613 0.834881655749951 0.2608698722372169 0.10019776394737478 0.18298494026008483 0.7088816009277331 0.7761913916346206 0.5893058914609557 0.8564371587524936 0.5052738024347779 0.6151985752465413 0.057879900789198024 0.7411336129886826 0.9127530562468997 0.2781581803453823 0.4387431227066586 0.5754355729146267 0.6388199703117959 0.07616082985353623 0.3129770859429687 0.99314228064946 0.8797533585900287 0.29507191774869224 0.0267031314941083 0.4336964069796584 0.20548535859625405 0.07514363607020147 0.04455917004381993 0.8453362024910396 0.6477397132191337 0.9872755602162029 0.8716894566584962 0.4751704020041053 0.43702069898733764 0.7957004252059314 0.7989162694292419 0.4395655867841526 0.2725159898699576 0.436910853114136 0.9298647421285644 0.6989001431680503 0.22870420897775123 0.8159316870520654 0.4341625518720045 0.522652494953387 0.4034334787894177 0.6235816224010517 0.7374389480578969 0.5239562994174836 0.19446495707807876 0.012157314424728671 0.40669809424024395 0.867168028080901 0.6549508639240454 0.6320114884701149 0.8187438735815686 0.30765766108111736 0.7230437691750278 0.22778645202402026 0.30968494596042806 0.30994656342282867 0.2468101627393583 0.7206026329721691 0.39514968735226097 0.608480888148277 0.6328479877818841 0.4612915939672131 0.8236933868725966 0.22474291823472548 0.6354844023987537 0.12980156762780282 0.6799801939153604 0.13506670518363162 0.27315639966479643 0.57719267970112 0.48967519135642656 0.4954388434683329 0.10955375943012113 0.7591783644013659 0.2945828077486312 0.5727283736089813 0.1693303679317355 0.7939579699041922 0.9644194030297791 0.30620517912142564 0.5716659400593439 0.09212549721079555 0.5907095979475308 0.7288689165575423 0.5596877150357477 0.45272741483060364 0.6686475585581481 0.3150257087245717 0.9797373867778543 0.9970825235782118 0.9143655467451091 0.6887132474059124 0.4216954002299266 0.8207651998493386 0.9879042448562773 0.02698912670295439 0.1700516173146358 0.2841929128573347 0.09628042210046639 0.9227615505292884 0.8351372473530783 0.9335475010055002 0.4363992651186954 0.43375531255928157 0.034910394960747215 0.14629931649790795 0.7041603553906477 0.5373700877368605 0.7799130123526669 0.7293917255749275 0.443957707742365 0.23560228836302488 0.27749480382656033 0.8631833411140218 0.8418756212974923 0.10986560756959907 0.7161252028862044 0.980533501407335 0.6762343351392277 0.212459688413567 0.21659177151759623 0.3979563507845192 0.5307404054505134 0.7995998363911672 0.544109529625742 0.7745576527761593 0.04834819162107151 0.5603357061583675 0.3638425987190903 0.764507247593667 0.9580414368969403 0.6763500272884946 0.7920656474757857 0.5500895829925214 0.8420939144097258 0.259542997056435 0.7461698353872936 0.2949137344885364 0.1279711741024454 0.45747205316332784 0.1289440439775491 0.9406528917195536 0.5868860269346209 0.7911303758083483 0.1960385340464198 0.18572318082462824 0.7001221607833694 0.09521216159232637 0.5502457703627736 0.930599982520628 0.6145923631945912 0.7679262113262064 0.08220818263634055 0.9520093935105087 0.050698530846612355 0.9055836468690165 0.8343907756724777 0.07702154073180079 0.4484128948953787 0.2965999182804968 0.2036317273895515 0.6681113768700915 0.8416011967816139 0.18021469836620818 0.11731154685444733 0.801541406224008 0.43596013249145615 0.13714785837919252 0.8191577274121883 0.12396071940945586 0.6325427678358181 0.472573747687409 0.6829279121123771 0.005902698793587069 0.28669764751511007 0.9762095957306526 0.0356920290958751 0.27590341493535697 0.3213484268442409 0.9999784917894559 0.31715956885944174 0.5152519517146589 0.4238925073417926 0.38062118452408567 0.027475188299698083 0.6209225103431568 0.44759916222182405 0.8527948325885112 0.8231324543195306 0.10302719314832942 0.3112383113357122 0.24761141102740825 0.8091317080541217 0.37316819963461645 0.6447736510444181 0.9522319772868693 0.5452350954458285 0.1013560976428961 0.164304775986866 0.8180473924647769 0.5351477444092357 0.5717008996665116 0.7760446160323731 0.9481401410983968 0.9996814957842315 0.850870914829819 0.7069702823120018 0.5691357006427838 0.548433102309987 0.4983542475654764 0.5388865707607693 0.49821662722347404 0.9592364500821225 0.49384597717467726 0.88299356409871 0.5529853309312107 0.6506239729551121 0.03462387568500269 0.5736505896457509 0.6200272696436456 0.5897597474124456 0.8645641884140647 0.6828808683087488 0.9974387366213923 0.31678273371132304 0.8364881813336826 0.36126274135951286 0.05724867831071989 0.4862255782966989 0.4808189576393195 0.496204579955285 0.23557718221286017 0.8271508820598512 0.45606778906622936 0.4452022916099828 0.8515381456406563 0.7058731521113688 0.8904574069633362 0.6495300525520893 0.2803066878053839 0.538611536405612 0.7890293639728182 0.7629159190569752 0.5614372126805235 0.14614496692129486 0.8768714741935454 0.04010631607440218 0.6104306914872061 0.7685175081708678 0.47664034709841374 0.12182416205956847 0.8197151056583557 0.8650689391901654 0.2773430141187937 0.025477440782410765 0.9846159179634296 0.01354147486120294 0.14138100903510353 0.10507658589564361 0.17055843616496325 0.1917406045436324 0.41814903554547034 0.9791703257046028 0.34965740895199804 0.47620437028439366 0.5850195658046575 0.9422114932675616 0.3780133086222983 0.724561768398602 0.42033363217602715 0.17261559327438847 0.15944928554761406 0.12173818633386224 0.3280573118678807 0.12987683909827286 0.6470861314298831 0.5648914972592225 0.8800524985960809 0.8934575302806104 0.6710672502351017 0.7379395483864569 0.1194492387064926 0.8377684183009947 0.44890937230145134 0.38376628397854096 0.6773965224080198 0.960409451151529 0.034369033674107485 0.7206701916710218 0.29550705656120024 0.3849414287332076 0.8913784871280829 0.16934510642300016 0.917600679053427 0.04481081861367453 0.24151941233483587 0.5576607379104012 0.11689049755363223 0.3216520666139704 0.826666270330565 0.7527358030568106 0.2585442807779904 0.4590421177655416 0.2528455625224588 0.2332739964508903 0.4625932142787691 0.901884980943825 0.13702104230177303 0.45985926719433456 0.12844912665617658 0.32195403467435657 0.8654293992216378 0.026883734696361072 0.5098296959879226 0.9817230430406144 0.8902544972187111 0.4870072501381637 0.5876816274810795 0.26146691318415083 0.1408427919586661 0.4275257455581012 0.27872906196348435 0.6287157271170617 0.30632603660693813 0.8477503420062554 0.8385651320617564 0.06449698888930588 0.45843232846441306 0.13747697047308371 0.17074253418510488 0.9774021333303403 0.7264293895852787 0.9888524196547077 0.6371087597014602 0.9343654857205133 0.5105594810826752 0.38741903631512464 0.8796702775251688 0.11614518019748121 0.025987920490448735 0.4228474524967025 0.11055909205895043 0.265846872931638 0.8062025859367936 0.0358425980509971 0.30421437232032766 0.040433109565557124 0.16296489419607296 0.9749354250107781 0.7346806938901659 0.09801249459429962 0.49932704859268895 0.17534877841073604 0.3409635852052122 0.9578043020392655 0.8028950029681796 0.49185312517792545 0.4746720319392781 0.6550259215427228 0.6084295976353056 0.21850350782833328 0.686716874418438 0.47955211974955836 0.21204328361061364 0.11037644588196249 0.3436228732351173 0.6350462006227692 0.9436634394808707 0.6859016594190847 0.02905133271993221 0.484365677508391 0.8600976318849077 0.43311016457951834 0.8824193818847025 0.15739525433015522 0.3079499048739628 0.16909675989804884 0.2540409155338267 0.689062938858274 0.42380146764114857 0.2092216769107269 0.31500324148902614 0.5281456131423473 0.7884833882377302 0.1910526124123343 0.5500326036796803 0.2115722690296964 0.13293239256592537 0.3677747166121871 0.3828910446245689 0.23428499439945938 0.2050355354047041 0.9367732752933464 0.5363856106987976 0.6613188338645267 0.650033326928674 0.5013195448538715 0.11286025416395051 0.8266135896636803 0.03146486249835556 0.0767675166348748 0.7960803958971162 0.542172058674845 0.9253025558283711 0.14739561900979659 0.021707732010033953 0.6015306563689422 0.35683852890264467 0.15600377892121964 0.4411036519049736 0.0031103051273124738 0.7180483294062836 0.46560320163239555 0.5119874603918554 0.24186646195487038 0.7025717392845533 0.27101239200942573 0.5895151114538116 0.2217591961893729 0.7261149765179511 0.15789998669563343 0.46263341404379166 0.8269158261194408 0.7619918477841376 0.007790837721826871 0.8831030797776116 0.9559595013582293 0.5146863774957814 0.7297791964262228 0.2683740221518258 0.8718297191663158 0.3787327957992853 0.8631685584838076 0.42453430637111644 0.0485213325838566 0.6858730327891203 0.43998509912811046 0.8318442156412236 0.3065209162121256 0.33036639768616693 0.622287418736789 0.762904975852764 0.9203204364471251 0.9804891468078801 0.536422527979465 0.2623017445586401 0.5542400683721199 0.7336768583112093 0.41485419980263927 0.8065986626323872 0.33689313183861824 0.25342588180020387 0.6259164480648806 0.28485344074800745 0.1199784365586134 0.13798434387158298 0.4228213933917052 0.7923576938554767 0.983685879864894 0.5140271787385053 0.2754777865435797 0.61409823723309 0.34885359685658857 0.6229243400973202 0.46215219014292785 0.6894742049083181 0.6351160873818411 0.4728740392740418 0.1927320873063917 0.7960928355728938 0.2646342513521692 0.46708871610960323 0.9113265765887524 0.6680070860866398 0.9526000936289741 0.28638386690003736 0.583166676031065 0.2725463185127608 0.6485603863231028 0.20393489277221244 0.7092440569536808 0.5099308737440837 0.1967046507916742 0.4406310519561548 0.531853276968303 0.6711501728920354 0.7686152618442526 0.13136927967983447 0.39517981632163246 0.09212326881087529 0.38857824284599474 0.9657950037335632 0.507301715214016 0.5759496401515257 0.4194051692434345 0.5914113572684766 0.4134502960663786 0.3200971199321667 0.7058184715566495 0.42264165785120655 0.33510129562289115 0.28166301586384024 0.18280332870189075 0.5863445309387859 0.1554444243855071 0.1413833827530765 0.14793162301480312 0.3524452060549049 0.23900615522044577 0.28585473367343317 0.06951436262363209 0.5477600219188109 0.46167692731760024 0.5174710639632536 0.756540963961251 0.13175587544421552 0.7971205022658331 0.5228688476188618 0.9517152625825288 0.32490010802151637 0.5565198066208922 0.48321074358437355 0.9254995162031556 0.4234201690025602 0.9108403442963684 0.5934290387967301 0.44692935252660504 0.7087070013753344 0.6382682832332164 0.6725875872558497 0.586064208547215 0.6652597148105118 0.1342077449659148 0.49381126115092255 0.16328936721409226 0.057983311204610755 0.5971974342525401 0.9397701961330067 0.30228702789756245 0.28781527737877655 0.9613340108424233 0.5984954578204069 0.46634594739502244 0.8440709673340975 0.5842659339166081 0.9543418394523489 0.5617799273802393 0.6025617432046391 0.9195479150319875 0.9002025596867583 0.17757790168091314 0.18045396389848445 0.4031228955529915 0.8996197095730141 0.7970093292093786 0.5520006414389865 0.5659251725301964 0.7612229516855226 0.8647601715378773 0.6536921640095675 0.9480741579228401 0.6529690649240134 0.9222768236947482 0.7630362874391625 0.7348557882582045 0.6799426110719912 0.6343461250171918 0.022091487609146587 0.16066443761272775 0.07701822104131062 0.42154202876566116 0.5950420579407012 0.6343049487919203 0.8264734682839554 0.5860202125162037 0.5985175369957333 0.22753129601711686 0.30068053241802484 0.27224412945451704 0.12402258014938838 0.638424190500441 0.3512937848100778 0.33957727490748413 0.38375821322116166 0.992187499028025 0.11888983944187104 0.24027799465975497 0.2542785744546989 0.8741688801805827 0.7208051657616759 0.2942847533959435 0.26684220648182766 0.8033876360180331 0.8899556945064765 0.3312815023901904 0.3989803193305721 0.9495782417147368 0.583712307030359 0.5308840541889172 0.8587189956570369 0.2727356879919258 0.5594441188643697 0.4333901272727111 0.4210560236391572 0.4213743448185088 0.9221317594531873 0.5163873843941589 0.7757072028662835 0.6641894189436863 0.68949085250879 0.4366149819410483 0.77580143998194 0.8495360286350513 0.4913358620218574 0.021826170669720146 0.4391170350554874 0.88811797482033 0.6224526921899247 0.364000701363724 0.04661692235749848 0.5365043178035789 0.8020382681186161 0.9131074291947184 0.707453427516016 0.32256910802168237 0.15780370848936598 0.2997193542783112 0.3152781202092637 0.29867540061652387 0.14912726896946948 0.7251342953425176 0.9076059268203189 0.9344019871544951 0.37118213822407187 0.1492566739325607 0.0581466685847275 0.8678373320120486 0.4407529285154558 0.1793469102619729 0.08143174181348867 0.42294197271709766 0.45449448233330225 0.5168467252443044 0.5097270590388946 0.7702070898099186 0.831674673476807 0.054840103716259136 0.5141379926939957 0.3588137580172368 0.7059830600052301 0.23531148684520442 0.6849758945412253 0.7807236626497142 0.41477530065670143 0.0732138096146151 0.860869594273491 0.5406088015138848 0.049163064908960386 0.333461323559227 0.5017699737891893 0.3002540588097036 0.4746477248844658 0.7073127504440725 0.43790894500995436 0.04204362124902117 0.7095355307508499 0.4762129097372959 0.6949391176150329 0.25336786221129315 0.057160779822218455 0.07401154310333513 0.4948936269360862 0.6722649744177177 0.927982542593503 0.16258054174610337 0.46327039895571653 0.3543371130286683 0.8589101789679764 0.3212014121685931 0.5344585786498613 0.8219583213791771 0.8397506466709216 0.2166973180761439 0.04088962201740076 0.8390174148800214 0.6904611800472124 0.1691347748521439 0.9957086410153388 0.9179157536192475 0.8979289665015351 0.3570222967303053 0.6344266748515344 0.010266695842693463 0.01728870994860021 0.5162841742562494 0.8679280047958743 0.958719524196743 0.47032832820987946 0.8788900413028556 0.41737063822682585 0.19455775623662575 0.33232176906741484 0.26987135579652444 0.0509327802427898 0.8541978146695086 0.4886672546581391 0.7355585685526124 0.5230212526624086 0.9574410229678638 0.3639979243663898 0.811223239068764 0.52847282298088 0.5413083480413367 0.2789715559814121 0.552912146133037 0.2274426935617092 0.6755134869143208 0.37373435984021164 0.756110632187574 0.6710890585655501 0.09910696601341273 0.9520934878866247 0.36297519915219845 0.7638261594547795 0.812550414508895 0.046721688574222986 0.9202262099027103 0.234832820032574 0.3354563702994653 0.9832566763177316 0.09139333464109645 0.45966726054150375 0.49745580255156807 0.7299467492143572 0.6499578781929745 0.8722529863298532 0.5681322878493279 0.8595333982453172 0.08403801827476798 0.13816476469750916 0.25168790663525875 0.021350658094100305 0.9286137411697677 0.2220577414259678 0.5729826695220209 0.9241599775989792 0.20789105398569996 0.9752466657834469 0.9638162518013105 0.9640283919895658 0.9182551133651136 0.893856570621548 0.9217963550386167 0.7965970723651264 0.31136505063668385 0.2536299153526337 0.4588660609286217 0.34937609079209475 0.8472934579601283 0.9143894256169653 0.18253746195650478 0.9972405746156668 0.07921609344785996 0.7053202519843011 0.26010672553913006 0.8830923476188832 0.40400805910719617 0.45164854820250167 0.704601775033131 0.6556899516442828 0.567205184324697 0.38395802975318716 0.3909401427649507 0.21793986191556547 0.046140018329184684 0.883300040586689 0.9617514607760514 0.4314795935135777 0.9516641546915962 0.25841669612955165 0.31454562511626916 0.10717343249861278 0.8867194502041847 0.5258107094818797 0.6753839588642788 0.026180106948498594 0.2791229353528274 0.9690597388639667 0.3589373185539959 0.18254083745482774 0.06628154927549601 0.5906867876750029 0.19079200853711775 0.91577536674848 0.2511538348314247 0.4380627447003832 0.22509039592202695 0.11076264490102161 0.7716284108459229 0.1842307829088824 0.829070192248885 0.18222166115668414 0.32958953934439605 0.2752066055386583 0.4257924157285369 0.13239733015779898 0.31129222544203683 0.41722261821781903 0.18732514095022712 0.5730952415476833 0.6972708211409978 0.17054806040463255 0.7528761356828885 0.23699785509924698 0.9078564241336675 0.9409312464875857 0.6098448652508189 0.16823454581673414 0.8594802436793383 0.7153741378401623 0.12048968668984572 0.4784659883749066 0.11729117680523993 0.47586334296779453 0.30288402154802685 0.2822832642709582 0.9212182750511538 0.342846321992746 0.38206074688496516 0.1845106936291755 0.03545908295674138 0.1496546550097546 0.8157210765652226 0.4820405099549975 0.8254778224585384 0.3297411038746991 0.9686108672820102 0.06411675265581063 0.7019476284942887 0.7105110523420357 0.8606436205907851 0.8475244484759757 0.08767360660305268 0.050692733620935626 0.44607597021807266 0.4669297792493291 0.36138452375956165 0.9326049391132445 0.6945183367538594 0.85565537358885 0.8113825303520242 0.9263449618751796 0.33928959685034366 0.29846421537406587 0.4923922056854313 0.7798335420230024 0.5346015115494619 0.515464978487756 0.020551363684915502 0.7568175339796018 0.8202849621431183 0.3450401003342406 0.764337820492723 0.1443452796630197 0.7312466610042262 0.6337374439859832 0.5933280259444443 0.38189377061228935 0.10857199871820467 0.01235519711521016 0.3320346792428004 0.22624031760731933 0.6701652247506042 0.3399889817714735 0.3326724372180432 0.8593359447712762 0.6810459769985456 0.7315986763611908 0.12855183793703817 0.8708519241554382 0.6268968502467517 0.2496922660337635 0.9589037339644041 0.6027340213397435 0.6391834881374513 0.5370124429707304 0.06628769762032038 0.43001447049733943 0.04614545037791251 0.8946512666115953 0.059339470533406824 0.17029441697705416 0.8311823995149923 0.5688065516443694 0.30646413461860167 0.9637245039393474 0.9686369829818245 0.29943369926808017 0.2801834899354694 0.6459750394742221 0.576701904570079 0.5053347889151947 0.0071970787194099994 0.848601192445257 0.5781558284379561 0.8327664795827646 0.2958185846975172 0.8043727800307461 0.0918588669678434 0.17843687544730247 0.21030028039332616 0.10955239776857328 0.09141521467419766 0.7128010607913626 0.34066019157735516 0.50076950152457 0.9621368267400098 0.8758533735834563 0.7769780135019664 0.04105231035158452 0.4634188115999759 0.7951544816467551 0.3387273080288655 0.9777431507175139 0.260863801342644 0.08461943355199242 0.5325633130294696 0.934437476961932 0.630968862363028 0.5226326246524091 0.9160870157508442 0.029750089622004983 0.16091521201606485 0.6793441860205721 0.03045781465798325 0.9776269828790233 0.2669778616800962 0.16554189072143255 0.8313287619589991 0.21708465557326018 0.13729815034769555 0.8092811917005195 0.5954295349538287 0.643673105510893 0.2682395733218733 0.7314953808304793 0.6092246421528896 0.5706226487297807 0.10827321856508809 0.9880969717561794 0.5564247856447075 0.1345489835648208 0.44931579350543027 0.17362008129436624 0.4127320659853291 0.4787546149852778 0.11267028511977184 0.1402433500980006 0.8290504936994075 0.6965792237715376 0.16519501735290587 0.7760710463730705 0.43849486374151514 0.6979153622445673 0.3710542475281249 0.6078401619272013 0.25788326225139946 0.8739198775276212 0.8353794524225947 0.8457591418921898 0.3709991479801852 0.6781159580261661 0.8283972706897921 0.37197439488684725 0.3787612131097846 0.016493955325072585 0.5920564843171472 0.08919821692866858 0.6805403827248322 0.6448843110679985 0.5259133914841703 0.9923222054284936 0.5112151023611781 0.6087887250681587 0.5250107030988906 0.30337833429809546 0.8001575053707009 0.12902083076810666 0.3997123378450965 0.018999675583542763 0.3411947274542445 0.23592117821335035 0.24593309470588687 0.2595709641482775 0.4666908442682426 0.8855752149827089 0.35392798372768186 0.8805491774315621 0.4042471992268586 0.2533777384690874 0.7447834241423473 0.9089726745378518 0.7611195504888799 0.3934505694408549 0.7059785936412293 0.501966904980913 0.12930283649728647 0.476834559738297 0.7608619601516068 0.28909342447031183 0.6333352494808548 0.37704146809157046 0.3516143750197288 0.15667846310567357 0.353225472746197 0.458464569996783 0.14124708223533444 0.7597957473930533 0.9465079365948162 0.04131170315731936 0.2474090078161284 0.3063681850129164 0.4662830442696074 0.16613773445028313 0.9092547781661547 0.44789096788471816 0.3262003826026668 0.9617183863637425 0.05835118368193437 0.43256891142213283 0.598694899152287 0.13192047859647504 0.015576341889358747 0.029198940706116505 0.3493873339596436 0.8203884555933069 0.06819259315034099 0.1139181935035053 0.5084119423108794 0.38628078178762926 0.9947506003358414 0.22697729847581538 0.8585357664825746 0.42995379394740396 0.7002084602396703 0.8529871386667606 0.08028595909872116 0.7444050085800619 0.9996268672832389 0.6556266747939294 0.43590233563325076 0.7364513954441456 0.2460269064331284 0.40658853925013927 0.5967845232942618 0.2610328767073755 0.9364748261275296 0.888577971031591 0.3232045726525028 0.7426946725776856 0.6501834446173342 0.623469944407668 0.1904072135668703 0.9202167258211894 0.6733976365596225 0.8792680814555698 0.3218216046554704 0.5435222282076049 0.4480567880780799 0.04329848660762614 0.13796011793664242 0.8764113144729924 0.544704563866743 0.11411018215842239 0.4586811043958454 0.9143302838617382 0.20484231238868378 0.7608432646014227 0.90684420876096 0.8133004361406908 0.6238992544897858 0.1263648867049333 0.4529883893706116 0.1442950645692389 0.053219273333005246 0.3092618782089377 0.1135142920929354 0.2367666786310334 0.4879400981174774 0.2498186496367092 0.268954204609704 0.0556913695601603 0.17736826025951535 0.5663971179452223 0.8042645833310716 0.7684103717923236 0.44542865838748613 0.7411754152718846 0.28967716097617746 0.4007136811024161 0.340994876249785 0.9379902632735785 0.7184971138163676 0.7560242639354399 0.45138270933286995 0.6500679748981865 0.8973372217602763 0.07720572043743268 0.8070159829367501 0.32171699996049274 0.4446294747816185 0.7674298452377051 0.1812499565015474 0.06559511175270294 0.49485657447144227 0.854705287373094 0.24656498564385698 0.6059524028668656 0.6287977084283682 0.7840818659782043 0.9868186893145384 0.6182910341395051 0.2541839304343656 0.5522099125156816 0.48497208689851323 0.7071713001248158 0.6465111133065846 0.16084709453896362 0.7066816894598217 0.36643300471511675 0.12635865614834185 0.8590703300627639 0.18237573812178764 0.3998898466363098 0.397414870630062 0.6683636469537516 0.5539096195358116 0.44559374824936404 0.8985109205770709 0.24041732249047998 0.7966220594633934 0.4430552114387646 0.1333167446125606 0.18438728311620367 0.5535901198837545 0.11034465765031021 0.33295422507525674 0.618528699188012 0.42610774977873866 0.7567889312023907 0.4796342924489112 0.8505304426354868 0.2528889197794345 0.04701608998816553 0.6735262095465014 0.7715424148581065 0.38022333867346825 0.6533907524480372 0.06041869546506029 0.7608009444424269 0.6762890493624549 0.08642122679229147 0.05433458582659223 0.6409220607452181 0.5750525931849213 0.3765088947299181 0.3871115651049001 0.4983460709802523 0.7486646447380046 0.6954492911672906 0.596063890720997 0.9890812336990361 0.10198465535444845 0.45238305818307856 0.9211908129261164 0.08680846189397595 0.6588454996845963 0.8764348716737832 0.22499952971105708 0.09857256164954864 0.5668950436349783 0.6070663982526255 0.4708619010199947 0.1727001713519195 0.886328461521916 0.3532092870808946 0.4999254072547229 0.454248210876373 0.04755161010003739 0.25353288786212946 0.3319510016116738 0.5052009488349533 0.8637921300460781 0.17274033215411844 0.9820514135306427 0.5997976461979252 0.766901981279386 0.8025760737713828 0.833155719154113 0.02978711987869731 0.4980710241416454 0.10126659935590343 0.9331267431764847 0.6100691490881917 0.8298985224334097 0.30140552072105353 0.1958102240472296 0.1620973128687727 0.02224544400971351 0.44383281348017567 0.376895695849915 0.8705812746286099 0.6215172115303965 0.1538475238091237 0.8683207269669532 0.05844119230750999 0.901503379885934 0.16613214313014546 0.8090519565148521 0.10563049153249804 0.7136265595963892 0.9260557195949792 0.7454275394386255 0.9493575862132354 0.7654305238244201 0.4475620527204808 0.09084758059832676 0.8464012681059284 0.3783628370418711 0.49163849675100635 0.9324501786307159 0.8526280861141564 0.5893848769824417 0.044399576956810405 0.31151310660366094 0.7804848409319053 0.05566804810877901 0.020109037923236706 0.9646715012329583 0.23215254455086853 0.5976119568277892 0.09200272303466972 0.6546449244677484 0.674212966394642 0.37264657776597676 0.562564290123869 0.6420173616477846 0.4190925858024098 0.6107571190554558 0.224943770752142 0.5537162253906025 0.8092127651709295 0.5623970649553899 0.23849938822120387 0.8891877868097392 0.8616894578923351 0.8104309613826122 0.14398106401960253 0.6718675496662437 0.12458913116874604 0.3799755978802899 0.2776634190678867 0.30139630442278276 0.5201624831507696 0.1434843165579449 0.008775488643909313 0.49113369782734984 0.5061407479848717 0.8631463605548335 0.42086292417312654 0.285873096555203 0.9730942342232368 0.8386488390072354 0.28525767107690336 0.5462625522461609 0.9140160726029518 0.5355764149022141 0.4336195574510068 0.9592888761523618 0.05170886416557352 0.7581997297041514 0.9265524580840138 0.5882827756697402 0.29021550076712477 0.9001303229374351 0.736587871267462 0.21125521056106067 0.06844972496630997 0.5572965366329058 0.016366674769924705 0.2129760306896309 0.43734602159859404 0.3199211815633546 0.4066396845709399 0.5940579288554326 0.9562625168713095 0.8736585083765274 0.25374654955374765 0.739410396051365 0.3100344790327132 0.6347744078153609 0.052521073384548256 0.9487794786191752 0.8849903338200469 0.02023065426423476 0.9081110344986544 0.3736910335857986 0.41040582960034977 0.11076010947585047 0.31898854920341435 0.9279668913257723 0.17128663130148192 0.9034967427425008 0.12346723822505867 0.9430842973799898 0.22903262588980577 0.5704507737746385 0.023330383618669903 0.8250013039279074 0.5484527725115494 0.14009559256897397 0.7007142370652081 0.7356717636526887 0.9227618395415778 0.8015601910528591 0.17817935413914676 0.370325449928557 0.11260578423945222 0.5943967171304885 0.7369414742376903 0.23069024629228574 0.6708254393767509 0.5916200108303623 0.4045234879303855 0.404685179303674 0.5115832391350976 0.8096918776500086 0.7506281095572644 0.7547140306531259 0.400640965171179 0.9863501675291261 0.4888205792531414 0.5895416068134653 0.1929910969505425 0.05565841931922211 0.171649477304459 0.34883571094300325 0.8247081774001178 0.6193793274235327 0.08812303496924889 0.564809687301854 0.7778007285037889 0.9647509386710483 0.32499690301163475 0.3820018563471618 0.9800081948288181 0.2109717952357062 0.8371192634812848 0.21407989947751405 0.8759017521699206 0.9801773871967739 0.28711713364750424 0.6956595857478125 0.7113113865382603 0.5834168754845626 0.3609977814942795 0.9474789485347795 0.9100164819648299 0.08811589875176151 0.24377768411087086 0.314870507152491 0.7898001247877324 0.7907372536666438 0.9480403806257871 0.16232590833749527 0.7643826244771033 0.5568286469201205 0.22836436196615761 0.01104825815245769 0.608412747259821 0.21037232532216132 0.8858503568892108 0.2917401680095242 0.32461003116389664 0.6553288038154462 0.3695169827588468 0.25147566394021204 0.6453149468482233 0.6944929263056089 0.7151225250563046 0.10135701505911487 0.22511831462972365 0.5742571076536959 0.027009962526495013 0.320758039779963 0.13690495988569484 0.12860371268933835 0.9048608346592312 0.14430764456393763 0.3705610981510833 0.021624058421240155 0.4447835688754441 0.6493687960211598 0.6517585251033797 0.9950893083433309 0.7029498700538441 0.5346194041922486 0.04764332708624042 0.21607407761080688 0.8000325097883152 0.6344435372727997 0.5069201555821905 0.7893605952794267 0.16598692302250284 0.4491927889155113 0.6937380000289528 0.2503279866244825 0.7772576638841449 0.48797894048848434 0.2179468077461958 0.38625730205614983 0.6222802125329315 0.018511285760576146 0.6387084428716535 0.04745990884848916 0.046187653647994376 0.8975926557340079 0.4241102586906833 0.7043632876841316 0.07192023070407505 0.5162404855992004 0.38634228247120617 0.3826648233390322 0.27247617711060024 0.5350005665415116 0.9801644621593706 0.2658905943718829 0.277829603330946 0.00787245056430097 0.25180359326003043 0.6931732104000627 0.5400815387245738 0.6973675269621171 0.15608391879194994 0.49978856969437313 0.5516987941967065 0.8685718509482675 0.42150941074861736 0.6489688158265767 0.31423445137991335 0.6166878600670117 0.1361798003430177 0.8857344817014514 0.4887128105559787 0.8303800017337638 0.7518366456779769 0.7069660262483551 0.32390063226172217 0.2223519212734093 0.2589994090081562 0.28666147306315226 0.15497593354382255 0.4689329922373753 0.8730659727476495 0.12130272588557622 0.992644063327219 0.39205501869594306 0.6894158978227358 0.06748071735317807 0.4815403685018569 0.28985031468801437 0.414968705326079 0.13240541772384962 0.2967976253694492 0.9762047331295928 0.46431391522559107 0.8476533627972775 0.868945220577769 0.8249501330663785 0.4204429692986601 0.3074823527602294 0.1798468944883993 0.13098308632689393 0.10327795819210506 0.9852689105209758 0.2561717725460779 0.3984473521582105 0.2636055525387071 0.2551012044585985 0.9019336981782099 0.7452198311435305 0.22568911938060132 0.14028632042853317 0.1579218352848457 0.4732910677319122 0.6387454078805291 0.18650753125488118 0.9533855402261588 0.9962328279273217 0.8656370201471557 0.5145937839424305 0.42412478277775056 0.737797133110169 0.4365453425420516 0.0463400352592539 0.4111770371466923 0.31979217560547313 0.4260528607418872 0.6165437175055788 0.7255800942302828 0.3427327136187761 0.5739780432395222 0.18539926977774257 0.8672299298315904 0.3293639053829115 0.46346980584713526 0.6476190427354217 0.0196412163351527 0.9490084822259395 0.09428256015980996 0.7981256473381864 0.30745752256690384 0.4697899874629612 0.7448172551687711 0.8588871059664525 0.4998472628018663 0.49613508198608336 0.1034276300008331 0.5974431847761168 0.5704601652518115 0.4405603171421486 0.26765485216260243 0.47489732542205554 0.7834087739674912 0.851600022691496 0.3594681773124606 0.4496253958590771 0.2647777427287449 0.6738334828444573 0.185252877464666 0.03267920932483792 0.34045356931583004 0.7513212573662383 0.5812010539734594 0.9418391049032604 0.27360563059773557 0.4023621389090122 0.8279249471625926 0.23349637870094342 0.4189392957016339 0.6913335091480961 0.7925262591792834 0.10502306124756422 0.22588022298536437 0.10247958453084094 0.2620193719041959 0.6550134095508692 0.9691385774472032 0.6808639443702904 0.2689200946570479 0.764757016280235 0.5626403798825588 0.3473034238227536 0.07797571537866033 0.9215315081218888 0.32160316994233196 0.01695318043234062 0.9024264453284213 0.24277789701403985 0.07757093677452032 0.3553129903543757 0.34885214075667625 0.17994920065524878 0.6225135966096496 0.4907779336991499 0.05157214094787799 0.3817991923905537 0.060634691232253846 0.7694563738834728 0.9507305679126371 0.9791044164216934 0.28031996306115836 0.6858316312938352 0.19261310302710588 0.1162898212310518 0.423698971936077 0.4519435612722479 0.6864053566751173 0.9001541677702262 0.927580689078343 0.2781191057588571 0.9385172698259142 0.06186865016865828 0.4370630185360682 0.3298818198763932 0.9037618460247496 0.5356480644426311 0.6519053514305491 0.4985218082096803 0.24209044492689147 0.8459710720402739 0.9557926130959572 0.5769097385420305 0.5502512915001677 0.6649519776373726 0.4999075362483997 0.16748951739448747 0.849651650770026 0.15244376091886813 0.2647679903597401 0.6659858831349658 0.8548681209491978 0.7236779346132016 0.5080743633535266 0.9622151963510168 0.3216639464000237 0.7027239174071118 0.9213514899392652 0.07848857859494252 0.07903583481913046 0.9147767753829271 0.764458767525896 0.8264487431060462 0.1867270896368597 0.6462592023293207 0.22458089570568596 0.7047829607038333 0.027807970312155317 0.5589494809243258 0.2551006999211093 0.68310636906498 0.04533817428565201 0.08282467989326803 0.18979511174754116 0.22141157859245497 0.2627901948294675 0.5320337566127125 0.49908361029043957 0.8598315229004174 0.6898268237620706 0.6893930206129012 0.15759815743126093 0.980064802933657 0.20818977666677807 0.8870218622585313 0.22613248497469185 0.8876637285087078 0.5568706023627913 0.6742140689966813 0.41606763173796024 0.19348716438417513 0.5913989379689883 0.008366071288814725 0.600659585257813 0.5829716497241423 0.07899763533914184 0.5307602274582585 0.46511433721747486 0.4534760932897376 0.9095178983225087 0.8962279367203849 0.7461641983042401 0.5728065044906301 0.7514818978377726 0.3141937093001359 0.8018987592480842 0.8661291628641736 0.8632243002574956 0.42832781351886773 0.5738926123211391 0.6738321188287661 0.9810381862182417 0.41301220908458625 0.35621112819577994 0.33949607588901454 0.9493950411053227 0.6970504350117878 0.1696647399282334 0.7849880040684571 0.03272235847121663 0.03852884207679441 0.9547210060306841 0.8087916983100104 0.6538866883935021 0.769463447435228 0.24277325119908333 0.2040414422112108 0.6414435964768613 0.9048428638215869 0.4430028046195096 0.4343264468322814 0.9358809577087858 0.4679044544131623 0.3908619756326446 0.5281851342556091 0.1044946419055609 0.82195746112298 0.3442432741092146 0.05731016252717058 0.945201378056599 0.01879340839372201 0.1846893525625798 0.5488363590971654 0.9215062742128127 0.1747006262990054 0.18353774860035554 0.44868565089264767 0.16400728837140988 0.6673262212164676 0.32668341359166875 0.8007941307808621 0.2982326158297879 0.5101055773443984 0.8566804797201322 0.1361485493715544 0.4142716254823131 0.6057059316460279 0.38719469495698233 0.6759105773890515 0.08057295930668529 0.7059069072369616 0.672356389186012 0.8445091886794764 0.42244869934249707 0.8125679353455288 0.8723529748172969 0.261432559956436 0.9352367536644471 0.44280867915103006 0.9466395486181884 0.8345531163971178 0.8827379315391318 0.6512550712327898 0.6274342557201562 0.31317010487210317 0.8434580929124691 0.9531807476713988 0.2787052632063419 0.6755564334666969 0.21035032658705488 0.3245991814514191 0.8296443566266365 0.023647601105102445 0.22416541266529966 0.26883958982636336 0.8866617156894697 0.23661206815485925 0.42618345085711573 0.5338651996701074 0.07422527398089995 0.4953943878676029 0.2530134009176357 0.44056978447220163 0.6324074139549046 0.9319234882989146 0.8643113682675889 0.9395142284800585 0.30779035582762604 0.07953048288644693 0.1936825979426101 0.4520424760018148 0.6144209724099926 0.5052217156285691 0.6436958812842254 0.2714414892405014 0.6127949774556298 0.9898041588535218 0.8078536839220967 0.8007872663836583 0.45738897124365063 0.8884624037502857 0.09552501226728782 0.18634580630789999 0.09704851670159498 0.8522373342926023 0.16026198142809278 0.9688731636985235 0.6775788037108853 0.9199587169265702 0.5875854238884296 0.21988671016448103 0.39217426523816856 0.10053516603812174 0.3852389850993926 0.25595566447965334 0.7528620096883323 0.9571066296762843 0.4370745422924087 0.43128514337886126 0.5149231410355697 0.3774160703338446 0.23114697462383005 0.6973455607708479 0.7762951620960916 0.6688587047032531 0.8532590593747544 0.588024531917229 0.09524786067269986 0.352018343680061 0.4491127857628262 0.03975612452494981 0.8221637075763709 0.5431026637054589 0.10138576411489353 0.9883080790779913 0.22504341433516772 0.6135397668994681 0.3424672947174229 0.9973845955334941 0.4033957609689838 0.894101039540088 0.3292510887632095 0.2689286645887309 0.4314605096267946 0.5512662368904904 0.04329668312195312 0.2609845435535225 0.6492516349538291 0.4051480529333733 0.7478356103941861 0.7962076010191567 0.5286978046802724 0.3811460405061998 0.332511688787244 0.9825851425810576 0.0821955009033688 0.4915999798587517 0.10740981166772101 0.48765853319134334 0.35217958924295945 0.7290655095537855 0.06984124475562936 0.7085136224634327 0.12285543170180613 0.01943550294956986 0.6605171103427636 0.16203256137759015 0.6290248161798733 0.43098288741957047 0.5501567341414084 0.9833773157795834 0.7019781853210816 0.16450185858444177 0.05324069235335949 0.7654011496386766 0.4887139260030283 0.46007598985233145 0.35253295784522565 0.09766130441017429 0.6082846446342598 0.44760339967135987 0.8361604038604176 0.07093263932252536 0.36785979155226955 0.1410606766744371 0.49845221397935957 0.7015980911772477 0.45113260753255136 0.23058872104479133 0.5364055143829625 0.7000191302417242 0.31298875605017573 0.06183562009600929 0.9855576978511053 0.2579654574374629 0.36689282047480243 0.20859245341179933 0.48221025462270073 0.5624095271981769 0.9352382183160514 0.34261033424756115 0.013362479780207104 0.5564689245580285 0.8479446887653744 0.6835881782352258 0.8213778889316008 0.06300516419604685 0.5208770885388692 0.17267198885921897 0.4673202472457858 0.7623647342902399 0.32319787706421177 0.6544086216440401 0.42505968214392875 0.6729744671129352 0.05155505430412177 0.7749479642514002 0.477664180397423 0.7219799522342365 0.36904603225934474 0.008389378593213093 0.6182551391206319 0.712842463673431 0.1381872857482478 0.20851984192840978 0.11942011017294596 0.4977857807732473 0.6796587602492385 0.3497923626161239 0.928439750067641 0.44521190696877766 0.12238046738152897 0.6692454526636983 0.5700002312000284 0.35568020138319856 0.17551855815181316 0.2501653888540524 0.2937261952928888 0.8247717245092463 0.8493067780025398 0.19864499674321234 0.29078361558169596 0.6771366063906579 0.8791082299905171 0.955794003749378 0.42196580125716165 0.5032495367012183 0.8659317136956294 0.9612076123864188 0.049089941303884554 0.5188741005007363 0.43665567363879365 0.8374017530201998 0.242195896109777 0.9369641579012447 0.04850220397116256 0.5002745336713035 0.8469522864843698 0.7927062737289509 0.41484202155873995 0.11725544088725359 0.6398398491136207 0.6946344588606339 0.9132013109207712 0.6326553108404761 0.07309564907037358 0.28822429742202293 0.7513793004089805 0.6588312666970376 0.9170655688104109 0.3131008687912852 0.113875596368894 0.8921330480501639 0.4445273039744866 0.010970779923168084 0.988364165752258 0.8541981758451846 0.6697091772754954 0.29861566489832236 0.8807414841457586 0.6820150792851644 0.5541451729684768 0.5587606582499449 0.16731351185931842 0.894275063652632 0.02216971726943351 0.12135842446779033 0.4039770079368884 0.5468853876788253 0.09077047578681219 0.5088052419652535 0.5147886842029439 0.9571173718865036 0.4116812892607076 0.30083277990438995 0.41610487695351617 0.22371480898151375 0.048011009122822856 0.03829564835664723 0.29751417659509627 0.09938997734857735 0.3633690339203578 0.9298007549499663 0.4770733914400199 0.38248548087883405 0.4883612251117836 0.25110222939228866 0.47589733939535306 0.8861823369063448 0.9474949472809524 0.23441100874224008 0.7159413861319199 0.2164857353652484 0.6789453889286152 0.9332456856763285 0.034855017996496485 0.04432203588089623 0.8602997876984949 0.018177154185788247 0.03307341330558944 0.2924314860924295 0.6653099647427452 0.7097965498284253 0.12085380447763239 0.674343262960184 0.3198476802078708 0.08746238043721544 0.016751395687280124 0.32057073258108937 0.5996717406258236 0.825859129390958 0.3640697694766366 0.8062662914966703 0.21827919312269517 0.0061267024387164915 0.07810899811551864 0.7549271065557464 0.390027625587998 0.024296077598583987 0.014574066599659652 0.33434412527946444 0.2218793956126165 0.7769931641595182 0.5582951791924747 0.3838387917185311 0.4865325026785733 0.24285013659911558 0.7714507607072281 0.6159329176311771 0.7603672299024826 0.2977888788951447 0.8117727471352613 0.3209493010150367 0.8715515556144987 0.10331888618423835 0.43375296692086807 0.2696418674341806 0.0968122076375253 0.6892177026705733 0.5196585971296054 0.056805809408963515 0.5697929536836075 0.4154390947731085 0.7522035697334033 0.9579194915091465 0.9195847566083873 0.8428203272396774 0.7370117300683332 0.48122209991327125 0.24492754335405054 0.16813672177418992 0.1105470486511082 0.18923185029067768 0.14774666696440342 0.7862782534508965 0.4279750072707885 0.03656151542078301 0.6991014685897536 0.7307990265073385 0.9154580464515163 0.4249895740688452 0.891287583129351 0.7138179604627515 0.49267758298123565 0.41209678781743875 0.3417481820388215 0.23722464654697717 0.8239137652786123 0.026379512610602562 0.7576140725044663 0.7793712770937284 0.5398034104488233 0.4396027702204943 0.9441060000627197 0.3749084508456577 0.18693553608656188 0.8262575052526033 0.9456971799200393 1.4842725234431864E-4 0.9087328304423834 0.10033820621738454 0.24584746260748136 0.052316888937823824 0.3386067435112563 0.361258384852037 0.819285185184898 0.868279927095269 0.6951629136741239 0.8035731058399299 0.230226747742315 0.6668103957139081 0.6092723846256016 0.04927952642249145 0.9005873460352264 0.5181835450889957 0.7841133201163315 0.3468174970711546 0.1341515136255722 0.9458690722534799 0.1918274511084076 0.8474942103036547 0.4758351277651883 0.5473127037886133 0.14422339883920576 0.2179083982908806 0.5095851271508067 0.5842777030186767 0.23999962934964691 0.1760509989331498 0.2541272159308624 0.8379422737388494 0.8872471944963928 0.1491108028091045 0.9026858084950764 0.14992660556811666 0.8275330386353592 0.14261540451989974 0.7709643887268667 0.9776484884882253 0.84665243426363 0.08338896209977864 0.9670889264937019 0.980884469950074 0.647179060728516 0.35291006903905753 0.47788058268008216 0.1261387299558171 0.6792297664762957 0.9630045709972873 0.07773841527008862 0.04639931885666426 0.5784004818002226 0.8424097412984772 0.7095764145243668 0.4260877268769494 0.7436184514311472 5.976571805016428E-4 0.7616738972221145 0.9944839915669836 0.5026953949083424 0.9143473693543105 0.9699613960431234 0.3056365190643423 0.2596549022579112 0.342906815459607 0.5121795796512362 0.8587423945196544 0.9954468530099227 0.9191873173542217 0.22851109729303043 0.2702099329989156 0.5024501645672993 0.2872845966411647 0.802701264266658 0.14709264587124093 0.7732047728585724 0.26072222625148256 0.1683455024463808 0.03130946537915469 0.0351025912200833 0.4611648032453154 0.36693757410362016 0.28504349844202115 0.025306057421424888 0.16394968393063192 0.5774347764124362 0.06993735739614426 0.2339522256068972 0.6246376732139358 0.06151184884685634 0.6361802503619598 0.008739519123228234 0.9898848442083505 0.32433869392581927 0.9823082869765969 0.6102826952979815 0.006288936473854978 0.8776308056476992 0.5725162372457301 0.5351615790027744 0.132465376317074 0.6098862398349963 0.20550499075220519 0.682377241211248 0.33245974622348595 0.9181362152602083 0.31736656969504395 0.22896910802236958 0.7827049364890591 0.4362009691772013 0.9707338332157124 0.4018350194340079 0.9600828067053039 0.4063560705644055 0.5333997554717205 0.6853356910893674 0.09525150599552223 0.48447928397336204 0.5818368210865436 0.3880538165590002 0.7732517961900743 0.04316110242122573 0.8785241309100759 0.9071174192601417 0.5425065562905665 0.842927029064868 0.3667162815120687 0.4747856062427096 0.7536796379032622 0.27863323168597454 0.0022647036591594283 0.6495347216535197 0.4914617655941286 0.2762612584519343 0.8993282178396033 0.322671065494866 0.38520244194804265 0.31475210289754196 0.7052872046635499 0.5515361860818628 0.2884090612991449 0.9781830885454618 0.33990091176250237 0.10165930704376303 0.302484901923342 0.9055059382928804 0.23472531655168605 0.18275076816810365 0.3866805858772985 0.9982279906032506 0.9910347483225523 0.4972192219577697 0.8440837532244935 0.27083276080644425 0.9379671834364494 0.3849347477388594 0.30257456183422426 0.7843998485443969 0.4173397953191148 0.544248517504318 0.4200252033848071 0.6907987834976459 0.7475579367067747 0.5368194522823228 0.4346235863333049 0.5329948736424109 0.7262670614531598 0.04258667306338759 0.854053097907829 0.59641957448177 0.21987397673655773 0.6212660988104668 0.03136139809416383 0.2870217842472721 0.8577899871083858 0.5580383237598641 0.11070335196897929 0.2235306383452077 0.10022555882512052 0.22363412409155448 0.8427443119088593 0.9746243494875405 0.9805921740539523 0.2628129891579868 0.06736159770086025 0.14611362661748306 0.6370438926820798 0.8402690193163618 0.2403839668789145 0.8289126678498366 0.3424177962123094 0.11928770883632933 0.24145980840892178 0.25548419988273996 0.11780880458198217 0.12601616582397546 0.8417470398238035 0.8806094509501633 0.17224086914000714 0.16099846493362235 0.6329684843533258 0.0017481638850984638 0.3493736408881255 0.9596666506976723 0.9234788009320275 0.4649173332170926 0.4819094850888418 0.8747671397483785 0.09234028810356676 0.9639317904518382 0.055343067190160466 0.45624161101080385 0.1590061407536596 0.9340138130774194 0.8649215414725075 0.9378697320204386 0.8740618705638846 0.0651926699173353 0.4131606429205692 0.30502113768725714 0.46557689345142617 0.0165796750009507 0.63998801745205 0.8141524086140394 0.2859696043126705 0.999517519961773 0.20356190765099802 0.062224041887258386 0.9958913610806887 0.5157676693583857 0.9370400358926442 0.9630016740240736 0.5944858535951947 0.6863129473702625 0.2950495385585782 0.2772447458510858 0.5594822603183619 0.28701696417282807 0.573336175186927 0.6650746011257872 0.5959318626913062 0.8192955801758156 0.4360467250451442 0.5813536234632012 0.00899964461164715 0.6871204064892085 0.8173259329212803 0.7186956663599069 0.5140736163259504 0.8260876784505132 0.5773849698221941 0.11776695561182038 0.3765567027916501 0.9359502788055105 0.3879259474413258 0.510826423346087 0.1340567761958965 0.804924752967021 0.19778266361017993 0.9419925946384303 0.7952929498698571 0.08224907420001726 0.2868649910517359 0.21651393971847643 0.33288703982994117 0.2548810392244746 0.3549705757097441 0.2713580468446599 0.49131092712937985 0.5148913704792452 0.3117540652351417 0.13072477506868307 0.4166122866262999 0.6660193784126286 0.43512071935220153 0.7126267171737222 0.3811425770516519 0.2110233195301986 0.3475379170990448 0.3864702534265776 0.9243624326013586 0.28587170298557874 0.8232343988107562 0.8466088238681592 0.05990184977474278 0.16248561613477697 0.7839839572582263 0.5631295622950568 0.8094035449580145 0.6286562984393416 0.9671462920605213 0.7986732860146526 0.21527944585365277 0.6719265741886761 0.6568652932423326 0.9388768712534651 0.1573624583827391 0.5436381060198263 0.5578002791112867 0.6740665478852472 0.7525361700623672 0.46001131614364676 0.3604936574704537 0.3911186966719863 0.8517778853860525 0.9090383348130304 0.7984536686354251 0.9869450974711672 0.16565095747022585 0.7751334130428094 0.029258802321343813 0.19884769810112635 0.847134523365862 0.2678997448015429 0.9854657341805575 0.46202723333663864 0.9397801284887725 0.9470351970347235 0.9358303425067744 0.2508193936489197 0.9595363220717045 0.9723136681735085 0.8733270412260148 0.8377709787499308 0.02018376275828737 0.07574313588997761 0.503678684331896 0.41345768933733484 0.3351751533790285 0.6524150485117524 0.5465858861546499 0.5499840771986386 0.6327756517126188 0.7972241859321922 0.7815708208844226 0.062395776805740266 0.006751129007940615 0.07655187062302127 0.7713276354950411 0.42477226044126415 0.9914530847699669 0.31716977367752186 0.47769756568128974 0.0315585253209022 0.1980008304472941 0.7190352084809775 0.04886387510150869 0.7597322521933397 0.6773626235144308 0.15431341945209798 0.9010883986953706 0.48369260208640996 0.2864897190702853 0.6018759138656138 0.8442871054729205 0.4222956613472485 0.7252581411677134 0.284688198796082 0.4079702067182597 0.4562506181743562 0.584824938548372 0.2269144935476901 0.7125473489853255 0.8441298776466977 0.702107438177989 0.6282305392115077 0.2007259802229987 0.0865354372146172 0.3504447700991007 0.664750368964215 0.2947072173526766 0.040502488793447866 0.3131036888735852 0.06913640650155295 0.5771099796507511 0.7700111602313999 0.6841410346310398 0.11687656704467264 0.4070311717913301 0.03351522014854702 0.3202378288428078 0.4513761472904205 0.9773754407358841 0.7556867523990463 0.22747062036502186 0.5174262372996932 0.6955657253682593 0.3081138787527673 0.8085882050427864 0.24077433254411007 0.42697116043010763 0.9324786239410852 0.8192123778408278 0.9909373344045124 0.5788665211743464 0.17978022087210777 0.08548393357479656 0.7950606675155872 0.22246317460116116 0.8104586599357463 0.5210258365599952 0.23254155591511305 0.4515285813172729 0.20281247066222063 0.7476978624458402 0.4332985886061125 0.9583377126045068 0.9508934524583795 0.6724676349966006 0.12698380729416792 0.3711761893665957 0.998100207991949 0.7018256364080073 0.7460972772901097 0.24234193047616137 0.3489816247047671 0.8855222299854018 0.1728786042596434 0.7015682914635968 0.1293003008657675 0.1730070964715248 0.8380137488482381 0.6970182576678371 0.5231949036682243 0.13880291878022444 0.5286093642582183 0.4344760649930067 0.9063198910579588 0.9871461014010834 0.4435511856403056 0.09508900063355818 0.31512955534299303 0.38157976214372114 0.7462771684397291 0.16298857728209448 0.24501283272683538 0.6617852347593486 0.10815961386914097 0.7263472691569185 0.6537742935122419 0.30779001707829 0.5101051172981514 0.9289029820585475 0.9619956149735787 0.09079155186247001 0.8756452500292544 0.3437104738473753 0.7881010302240159 0.21018813694586147 0.9457042131548167 0.41685536239714904 0.13348936927968535 0.858009480034208 0.17339490656404977 0.28609261715694057 0.7418844076500961 0.07479089543227602 0.3166077494661409 0.21784864553493477 0.39709380652307513 0.4650374065031009 0.8368215817597391 0.6257518424124988 0.9946569041142073 0.32577432702551123 0.17447958133802277 0.13483376534006164 0.3253996770562684 0.008284015175230253 0.4644372434201143 0.4377691875436317 0.6427051336095672 0.8809784949488966 0.2794033540867332 0.7530000582298157 0.25034575500888356 0.09046055644971773 0.5449596838589661 0.5393149111436315 0.544014124141635 0.4281764588436111 0.8566581610860013 0.7474317165235026 0.1672267987368291 0.0016655657949264935 0.047859075774694615 0.2963589112382886 0.20106545498637451 0.082263654307277 0.006387685388379527 0.6507859199472 0.5972585556356661 0.4552179544821817 0.4187507265642919 0.3454808220891731 0.3853957476272446 0.833523900120751 0.3325682144042038 0.05364943202321193 0.5376061961852902 0.4133949956455627 0.17104033864407342 0.3284369576065066 0.7578215618245875 0.4471284017321202 0.10768263428029601 0.4602294981280407 0.8777823870044411 0.7882313451314188 0.515227952959376 0.9871394259280253 0.9882371990312082 0.7240237507631683 0.5395059199215975 0.27203546687326896 0.9113211419130292 0.05253527012887371 0.7172768310965318 0.21670430806744645 0.12889129324446058 0.28336550517415215 0.9564089069229106 0.6222076048925164 0.8216339662545896 0.046278470914129066 0.12745355003688652 0.9242248733894517 0.24504890757547193 0.2766533424015778 0.97758622643239 0.03314877418022466 0.2709542472123406 0.5135156761504137 0.07104536380049309 0.7909262703293841 0.5569448601636288 0.6937241391368164 0.9846149582459615 0.12011592635317336 0.47127630851360913 0.15499027041651925 0.042361605208149355 0.2853575086523784 0.5969350498710198 0.17075921592940757 0.46074993530837205 0.813427135134545 0.3661953728287407 0.2315822014609925 0.8934246977216749 0.5252776559578605 0.13575167674831135 0.4604619438206997 0.014362232568310707 0.8219405477367354 0.8025546788959065 0.0037544631078304525 0.9587574686347042 0.8358918635584601 0.8817686285178534 0.97456215235735 0.15292698943013805 0.3104618332594965 0.8208057955122464 0.5421542895520371 0.3076730264778913 0.10209388366895178 0.2071835911671639 0.058803230716529264 0.12396664426424842 0.01373571156748421 0.4429888488093383 0.6945254240261148 0.5654731265009985 0.26237110281619935 0.40502270041317134 0.9105686369638367 0.3893561400005374 0.25967856544689216 0.5872384441831136 0.24408773913687998 0.9949616223718931 0.5420284777342392 0.23087432938326802 0.2993035507135774 0.549396201618738 0.6245078312639525 0.9877216491189181 0.5445402067752272 0.779679084417117 0.2733875551176691 0.7332580304392309 0.4998884512676325 0.7830607006677144 0.19242461213144058 0.248914600592805 0.1668439046801642 0.7002647429597956 0.4006168199364456 0.626535114731351 0.09912039375547754 0.12082500483960312 0.6425058570064769 0.41312357429853164 0.34294082659174263 0.6453523213672935 0.016849635641644656 0.7102668203536047 0.585458564185904 0.11648064972805472 0.6354621995484727 0.6701568037098687 0.24156513092014387 0.6484056714952214 0.5312460582181915 0.08892663464359352 0.8821902063079643 0.7343435064679462 0.22291776290761334 0.08805383245142973 0.3613028556358642 0.39190061967800816 0.313654809765855 0.19587277365215128 0.23417321061905183 0.5182609686503346 0.25186819563607654 0.4937778013685329 0.576056443735618 0.3702592712287721 0.9145313880907087 0.8754871980618292 0.10831162978099107 0.3588248628234445 0.9869732672441223 0.8827776400338255 0.6180702634510934 0.15294302513651092 0.20776906737493217 0.027387399547154856 0.017920328825661413 0.4251961647670027 0.4233399980206096 0.9863260632700784 0.0336735048080814 0.43911046496695805 0.054098475834510884 0.5666171505913646 0.43113504186161156 0.07794771176065218 0.012452986916201114 0.8755396197205625 0.896896465339763 0.8452465937219634 0.33271248308096535 0.7037225635112111 0.17352412561957364 0.8293312538040812 0.7739077109431372 0.7309872965434463 0.8161167779250252 0.6171692582887782 0.4387722395588619 0.7986195548469979 0.9850582293942708 0.6673751159659848 0.015603616352030891 0.15679357708433583 0.018891586105011182 0.7078195069055868 0.5944510039176887 0.2921592471339596 0.14159403015939553 0.3102186434561395 0.13505254407156908 0.6681424183504938 0.5501319888807776 0.9122417923298882 0.5711717273497167 0.4692971434418173 0.3010284982823115 0.20708972394945357 0.22445951083724613 0.23108848472511434 0.8687760807202289 0.0011602798483324461 0.6706946209704986 0.05777270355617514 0.9842797606589708 0.3523316109103354 0.45223144064907816 0.9835815310504845 0.3781167672161718 0.04455779121280512 0.06271141288110427 0.9162054328600714 0.351250855332245 0.22888530519333927 0.44357050290597755 0.5166106962015031 0.6749405554637874 0.36666323385942934 0.8266016939742987 0.31551359869928697 0.18193270518448323 0.4476338006248197 0.13280806163866798 0.3443743693103891 0.5526336858638814 0.823754173155152 0.2950086477196292 0.5675621695774643 0.22280249300878596 0.8990143539840033 0.8101358400347541 0.4423824568192748 0.04341178206846619 0.3461803597756786 0.546889510835562 0.004945711985030132 0.30867360908761576 0.48330510743995236 0.3353898948958939 0.9823968987860325 0.03482931871981132 0.9360149370146493 0.1476974267842861 0.5658130860771329 0.920153644155877 0.7359921717087665 0.7415445342119116 0.6431166889253239 0.24572507624893458 0.5617392220842076 0.48435113423267184 0.4876396377516863 0.08686636790908664 0.9146237140543306 0.7005716160296048 0.12551413342931006 0.5967598302306062 0.6413673913260808 0.29264822109242583 0.8638536761886654 0.7965943969017684 0.3635569284289587 0.5743219371179907 0.7517278356402111 0.2607027798369659 0.05541740297758235 0.22477860234976932 0.06722286734652227 0.32049820142404595 0.223004439327824 0.023084842511739923 0.2764193427848298 0.9276225233025693 0.8811344054552651 0.554594362079613 0.2939074740378972 0.11456820735563467 0.7987862327203585 0.3944823695537719 0.2894992473170328 0.07013916689935029 0.2893727785361224 0.368292133566817 0.7320860656299131 0.20746602256481694 0.15919334525755047 0.3653539755623735 0.8391292990986909 0.24389382981973207 0.295543137355453 0.8381643888975077 0.10906045662787556 0.2349111340696669 0.7753697701967698 0.5592883639094907 0.1539065966365576 0.708391438562379 0.3060304987553486 0.42707577469013613 0.620745698353341 0.5307690682762082 0.27161850576924607 0.112458405481491 0.5931360038236527 0.9033735575065586 0.6514899473535042 0.12132085787867108 0.4974517265131848 0.9349977553915404 0.6020383012893777 0.41041573736427195 0.20409756820778646 0.5658770673132214 0.5484259189099209 0.039519321360061865 0.7098627049374666 0.22860778485281552 0.2198971533908658 0.4685008494594981 0.5152144092329579 0.5231352515742529 0.18046149051526506 0.015205249089846218 0.5691203628089233 0.3557931662102275 0.05614884691853084 0.6785252930279064 0.444027979136132 0.44647736411832406 0.8407038175301974 7.662647665580558E-4 0.22491765623452709 0.843388885801927 0.3734989579392556 0.894382082636783 0.44888664923286903 0.10138293192099246 0.3266120993488544 0.39038923074403364 0.3155087460081367 0.07781115095270452 0.8124261440143344 0.07924053062677971 0.30325390587612056 0.8788472460070905 0.02877402765128001 0.6096682936640135 0.2735419874931283 0.34963685790351406 0.19556560858324945 0.9869818314443335 0.12154239939430878 0.6432139898192475 0.4049974791976009 0.5695320952068484 0.04162176801184003 0.8259786892937442 0.9068603187319941 0.6005316424054364 0.5153801973482292 0.1816314201876792 0.7811231013549397 0.8321457630893523 0.20150559709807525 0.860811475883052 0.911886721634934 0.29974588137947367 0.009215358753023373 0.7902884303547032 0.6699645598604383 0.5864445081581896 0.770842739484914 0.30340684673285967 0.7357609040217753 0.981505891101327 0.09416520295650288 0.7825865451106634 0.049782372682869225 0.7809546140711975 0.17136060991956303 0.7150317354791036 0.9650317834964354 0.6046795407777512 0.5490162929625242 0.9544042594511081 0.2936976897042739 0.011558444249350375 0.12240512281994587 0.6540675806518195 0.08996703019022934 0.8826040496177711 0.9072817825621773 0.2916098974717768 0.6914522773602321 0.011859453757444682 0.46374140212927806 0.4397059441353458 0.7536386279511309 0.09574728921762066 0.19922197569813416 0.454979216646724 0.7549209413003319 0.8438034955200862 0.16152082831649306 0.05437715475673244 0.7242138108044249 0.24720326747358723 0.3477345462592435 0.8276741651741335 0.7487827132626703 0.1362129077199543 0.001320984340954956 0.09506814068039093 0.15685790390690002 0.03367864781082752 0.8470206914138128 0.16043869716018655 0.13775683405827588 0.9688892172995127 0.7221645594857098 0.9336855435065103 0.9616551400060237 0.35462601385210135 0.6029333314025352 0.09966844820630893 0.8918100153214777 0.2499411624799759 0.5954234823050707 0.4966206726320508 0.3439539582745095 0.33218038313720033 0.06391374065821431 0.1335099153998387 0.8407411832854667 0.6839938915704437 0.7025145725587845 0.32367219160615857 0.833542625563717 0.06455541762449779 0.02142286473427324 0.7411457282066031 0.5195498072594588 0.6789358994199813 0.4152929461256728 0.8164831173905581 0.36906190301313413 0.5855549181639818 0.7087733726330739 0.7804223620706027 0.19599426193587177 0.7888595811372999 0.833049470219578 0.836648091311992 0.25406070981808915 0.7528568329841616 0.07726891508287348 0.3989762479199409 0.021746348555662487 0.5408522848057792 0.1800183911417943 0.06364739085855164 0.05110097810172176 0.03772494269911708 0.3785568544212773 0.004014577230131988 0.49854819309223863 0.743293243629589 0.417301513849203 0.772763916778455 0.7303182541903878 0.43880553383446286 0.4321003778404723 0.07590743384676091 0.7007779966593821 0.6233134704043266 0.5731151524298418 0.9431391254442129 0.6388440539740106 0.9926643649395334 0.20667683334680742 0.8607469338204383 0.9458305657309517 0.39107425551824615 0.5452659848959636 0.8561246401296392 0.10749616257210859 0.603720842403787 0.5948078202103917 0.22702996720312008 0.8537190750260626 0.07852098629905047 0.37976233686012084 0.5940853099969731 0.7611435192863576 0.16623540294872974 0.5090279850159877 0.22162090106157117 0.9873043706271202 0.7502681151768351 0.2794668263386568 0.09981024927588922 0.8491513341680375 0.2010063121082023 0.6788982115530632 0.1101500255437563 0.5145577818966668 0.4342717933124779 0.8466967526871059 0.7066881097194049 0.16231336775386396 0.4794321364386488 0.7776698352813118 0.9370045567393532 0.5842640850365449 0.36994639204283697 0.31367863129846074 0.2998026643330437 0.3526769383421683 0.35759634464137424 0.7575059207867959 0.8900679409172702 0.356570519800112 0.42138116802172987 0.7647628323857197 0.5181050821221338 0.7226675064065391 0.7930300047657638 0.5221658976888925 0.0053641621226532266 0.8797881818260435 0.7533130485388948 0.6043047776337308 0.6578191134146325 0.7999187506961772 0.26680895071908306 0.3160599435904867 0.9967629094749736 0.2548523701072931 0.11751227718364798 0.7741189734167138 0.18723548354796793 0.2642143523207281 0.08364179503376856 0.4585790295519807 0.24590214968748836 0.7788475936655586 0.3472367547744054 0.4744118500787389 0.03305157291625893 0.042948004865868805 0.7726047513358922 0.30260736685885203 0.23748413251301914 0.009003928703530284 0.9074115698516232 0.3921001106566352 0.04841292703556388 0.6415076358694173 0.5611798148290632 0.546122984298814 0.9751013075811158 0.9385707655813983 0.5981897399198408 0.6904203573769336 0.32281251816132583 0.02499020124204898 0.1452522865891941 0.275793137465408 0.16274812817748618 0.725974256910283 0.7595507247916057 0.26373655774528404 0.24400424996160064 0.6176534626454802 0.22772421233613416 0.3714704208651147 0.18717527258482347 0.6686647072747434 0.5745926810648999 0.011451558144208174 0.4720869322540422 0.23651046121169317 0.5483995650154068 0.15049608317913366 0.18624226598649074 0.3630601488218028 0.3191747881188479 0.13492888638266154 0.23591468831900586 0.025473730915413273 0.37779777910895485 0.3807168262388547 0.7384250857062612 0.2419924102838683 0.6555659946389389 0.7113307903429457 0.6119905805902909 0.7064548234223801 0.6975518012842582 0.041121044173785926 0.95863457632338 0.9310617374084719 0.31775638939666984 0.04827943217127695 0.6959991415644002 0.7138256587344018 0.5400467214083454 0.4590010440612814 0.03550229468904875 0.11530651823136895 0.7860872547222 0.5732467306721385 0.8271344541688084 0.02864807066585906 0.20995549871334906 0.702547405702511 0.49528984235159645 0.8575941190118562 0.7988564459791808 0.261579704297495 0.2753798931132071 0.7790775622837456 0.36032788439303165 0.3577817461825328 0.7275779999426356 0.5601391831011707 0.798554909611189 0.8570671575682852 0.051895612670677105 0.923347647796495 0.1458887052396427 0.0687339201790409 0.55789189239482 0.010555540123861173 0.6603909458160252 0.38895424294430714 0.7526704810086234 0.5817410269916773 0.9262021087894351 0.4697704208665473 0.9024184175871149 0.20384821930402652 0.30075825938530376 0.739310897442101 0.5955941069321633 0.8339538420479455 0.24135753970542784 0.388979572796318 0.7899722045411016 0.8817514833483139 0.013304129094116046 0.26466066366520624 0.06110578581967907 0.048259180864929974 0.03283402551400216 0.451520393357529 0.28636647532525616 0.9808194735751177 0.6302537764310937 0.7257759168981374 0.7965762266800598 0.058876248099811845 0.3321990108104972 0.46184085576099454 0.10634468190995416 0.4306753454489698 0.8566476738355868 0.02954591107090676 0.6945784326259284 0.7579512860978904 0.8902977741580069 0.19367219918293477 0.9482297615167189 0.9938255373636277 0.37514735914222397 0.5368238068064727 0.3052040660789548 0.9185565402460037 0.46959000207067125 0.4667052797804552 0.8364466805387878 0.08844510398691274 0.5690665038501493 0.9401588931922626 0.9925814251323926 0.3151135968052753 0.7722893084365049 0.6388552878412409 0.007074805102709525 0.7855405558451302 0.9612832713037798 0.9653204421596334 0.4957512655293934 0.4576472908945013 0.22696257924486007 0.8930211153517802 0.12158643144972237 0.42189636742812275 0.7166904218297355 0.4005192059467414 0.8040569895630059 0.5062763860207805 0.10624470000556796 0.9472457066972014 0.8439274107095952 0.5202114092282719 0.8058390295534339 0.2072329294734585 0.5072049520686754 0.1823473196862473 0.6983255382742759 0.0671833265997489 0.06609650671948697 0.9578316373578556 0.6774324095010288 0.7165421935776136 0.030151740091279078 0.7426173282262206 0.5048649100343777 0.5097803983839492 0.31737938420760436 0.5754851396958812 0.5373265294921209 0.16488270017087203 0.4129740497376414 0.3328930591431136 0.6879921713820557 0.7770758541260726 0.14239367972969608 0.6674258647582226 0.5816927678214402 0.07312027801063126 0.5458780242471455 0.28753105114664257 0.23904655914560302 0.30902743370910435 0.48310146246009844 0.04439248241947058 0.78199970101686 0.2228550107188877 0.019503758747772326 0.19120628702222708 0.5917143183885184 0.49427009040766756 0.34994500823414765 0.6107881438867764 0.06946987952949102 0.23386049007297138 6.815898610228466E-4 0.7693549834712834 0.6522192247152792 0.1277127090561141 0.3172625590221223 0.6474879253302278 0.016668074477538264 0.9965646185508453 0.7937963003061497 0.9236980540133013 0.4692225765821869 0.6938795998980299 0.6281400582867573 0.24167978962526304 0.36387696075025167 0.4634782398273469 0.9979153872805755 0.4792513718405026 0.016088766343293592 0.1049027283012115 0.9438551097213984 0.5551456941494662 0.9879168313523595 0.5534776710267828 0.41983793454885754 0.45922896324116846 0.8081729597192178 0.7720711359857783 0.02306958940509407 0.8723886120340274 0.16106521143776997 0.17284746758231606 0.1532198123339814 0.954245742770171 0.32778697006711743 0.13984375981863872 0.8659268533978893 0.6200122363732412 0.19773300416583206 0.6069898744871687 0.48393219942876076 0.902225540803188 0.47949956656698434 0.06123182815049266 0.10821027628642299 0.7987593210568421 0.8365531038841665 0.7867067806980185 0.5318089504554485 0.3454213252257766 0.5484275485716887 0.6434571972107789 0.700718727426965 0.5196148242315214 0.9555379374080044 0.18296093594941465 0.7889394674152096 0.9809530028153385 0.384173698576067 0.44279275538268803 0.15901584705586647 0.5853804562569365 0.4679620402633975 0.3340221498820458 0.9646633062607197 0.07126999263979372 0.7830681621193185 0.13749120571958462 0.9187090046860423 0.30912649268875303 0.49662601690552965 0.2589052550954266 0.7974225890376694 0.030944128394349235 0.29136906160220244 0.05770857059179524 0.5814143027705558 0.010927327494160699 0.1741581184393628 0.26100563598422144 0.7436286385929558 0.2561993443927031 0.9020789742896825 0.20132846805478744 0.08656385822921553 0.8842870815178182 0.6670096303916552 0.8758388292972911 0.7036990032528879 0.013584573851996296 0.36803337205315845 0.614593555981759 0.0942115981593844 0.17366124221476986 0.27080035621564813 0.9667904080606126 0.19542108730351126 0.45419590440592306 0.10391721353867456 0.2664504096184993 0.11399394388794049 0.8544807912293201 0.6746704412315248 0.22278237913167698 0.6347988750513444 0.06985426302311748 0.8998660917890489 0.7955138933274174 0.16540135891526897 0.25708508640905103 0.7422948897391614 0.5617621793666441 0.6345385545146996 0.1576449062332277 0.9193081056545719 0.758557538047756 0.004517380777386726 0.3259696642603275 0.4072167592359546 0.19392086574630807 0.48032975949187984 0.10922508469534098 0.7037025376452524 0.259436022491791 0.20238098571720542 0.4557550878734413 0.530155660147726 0.3485465620904137 0.19507834781513023 0.591305927043174 0.15683202064932045 0.08734599393255138 0.6101289285427792 0.09528355021612478 0.8159473736421348 0.918419124407022 0.8512471886674811 0.9337149045911107 0.7313040004994834 0.7433315277685376 0.9960739736187669 0.5612487841288781 0.13777125318550298 0.5256530337302049 0.04146101833238858 0.29144510810001667 0.28991793094248564 0.9839404989738755 0.8898754582032784 0.7527662645923224 0.8663214086769981 0.7628937706876097 0.6675317552969239 0.39709767583517075 0.36455449228120507 0.7609380230583505 0.9189584691547855 0.7082893216552508 0.05157794394030668 0.6137748818168464 0.45806050544177235 0.970123388048405 0.7852429945733568 0.685983396107955 0.17233761973585005 0.19627793352674816 0.5548346412875143 0.3065413420495321 0.8747723725673098 0.09926999808479497 0.9020633535135837 0.3991764456612321 0.37700208751795905 0.7810260044855696 0.47863911806616755 0.1047455963674947 0.31774056575933896 0.16464115878379393 0.8725385391126842 0.4703800274658526 0.9220177287508676 0.05505371084563393 0.4923019457617799 0.7889228312737069 0.7896557623815607 0.1157737453431551 0.737174369588495 0.712224468112001 0.015502937651319049 0.9794709953023011 0.9006307938196598 0.8331512853877484 0.4292973001547499 0.17213905333592605 0.25580215570140796 0.0337003980143632 0.5096340734251227 0.5237185470291331 0.5535459620984543 0.530404231764753 0.5743621645602904 0.26247506792962705 0.7273810753996187 0.21324633353283584 0.17883704152107482 0.04609238510270963 0.9442335056803158 0.5705995977098299 0.26380812896120187 0.7693550371287403 0.26591613418437576 0.22023696841601315 0.48632121700914943 0.10645847948274456 0.09349214458673316 0.8252535939165961 0.33986740855908537 0.3953995674413642 0.25514819617955475 0.9637787494244067 0.12411178249138488 0.6525634600698504 0.06793039309293292 0.34264471358258164 0.5598868354028317 0.2175377057591561 0.9546721424815025 0.5977187782738402 0.8617368398012468 0.7713479151234223 0.22966992982653744 0.8073940784493709 0.3781511167331949 0.24572377136600332 0.48013954491592903 0.7607732347763668 0.4887257627806645 0.44666818800095565 0.5652115911893222 0.9683686786847581 0.30784501251572327 0.052515945756928084 0.6302020980423065 0.3806818625080489 0.8860745468934926 0.014833315062302677 0.691120225354596 0.7888134463018771 0.02331122541414632 0.28992285519144334 0.2828519367622321 0.9212944660117874 0.5118349135372392 0.2593629764472778 0.38418469252149945 0.3226188654947756 0.612759637891035 0.15992466775292802 0.3338153133402114 0.2014957895011903 0.8359831110763268 0.3363730653172129 0.7337091220188731 0.5706297243092675 0.6415847418418991 0.15507453005630556 0.39965558982996885 0.1648751060532737 0.509724715973311 0.9719947547194556 0.2635917938963803 0.8209019121942093 0.9364469923070836 0.63558696223314 0.7582420122641577 0.2290465817821843 0.3043918901313837 0.623233690017152 0.6218478971565654 0.22681308391887178 0.25027474440311714 0.05931931200791751 0.24924055787670318 0.09352679613579795 0.10508886883336477 0.5436751934938864 0.6852323758033393 0.6269336250149754 0.21834273598586962 0.931052387305728 0.10728283742791656 0.13507575847886621 0.49424257497476387 0.9454421165261024 0.1766191670146392 0.8605641043075091 0.265583588922658 0.7874769568009795 0.9504695897707678 0.7694073327179901 0.710214007316244 0.6988111886799743 0.14279284144907745 0.08284543262606525 0.37528832873856144 0.7842915896468506 0.3968648326848443 0.4081757752105375 0.016127067485073088 0.26089446187274834 0.8903094342820649 0.8333481293276461 0.8793524923716309 0.7018191421933019 0.03163709535767567 0.890144215756402 0.7640708534305046 0.7747917362081276 0.22708197779232275 0.8620467592604824 0.6802236673400645 0.4355470425583857 0.6975036698800775 0.5154416006502012 0.6898317040830615 0.5306207965937187 0.10734775506226946 0.9665859194480778 0.5647072332217048 0.2879984945961612 0.3311345232020362 0.06181433737474584 0.22331144760576072 0.2298308475612132 0.02767283776725904 0.34474397968405257 0.519785917780937 0.6283402662160148 0.81817187397101 0.4117785713175125 0.8497876686006736 0.744206385179254 0.7716507264514804 0.21600068064551547 0.2106101249078559 0.7361336993089533 0.589521755324204 0.6695799822914602 0.8071727968361477 0.3109668711279203 0.35095087233233346 0.7161153376079215 0.03081465415535911 0.8956350011164439 0.9315736947165644 0.8968665022507035 0.6544277061982162 0.355369052944008 0.24037374256889854 0.3186211051686352 0.38531838905942917 0.05705104451543219 0.23300503302172382 0.8459811027527024 0.49628660407977643 0.38221794370655005 0.254568275091538 0.2871423714371193 0.11576325203109183 0.6311569531219519 0.6408258426242939 0.31978006100806233 0.8994490265709364 0.6276927463782123 0.6640683774776689 0.9124409084623236 0.22874848891525423 0.23368201906192343 0.8483209145866678 0.03688372640973436 0.785395414190305 0.4128938776330362 0.3983044724329381 0.5380973928697788 0.33701654375200674 0.4458694768029131 |
#install.packages("rjags")
library(rjags)
set.seed(20141020)
#############modelm - using point mass mixture prior for signals###############
modelm <- "
model{
# likelihood
for (i in 1:length(y)){
y[i] ~ dnorm(beta[i], 1/sigma[i]^2)
beta[i] <- (1 - bin.beta[i])*norm.beta[i]
bin.beta[i] ~ dbern(pi.beta)
norm.beta[i] ~ dnorm(beta0, 1/sigma0^2)
sigma[i] ~ dunif(0, 100)
}
# prior distribution for the parameters ####
pi.beta ~ dbeta(5, 1)
beta0 ~ dnorm(0, 100)
sigma0 ~ dunif(0, 100)
}
"
# load("U:/R/RA/Data/RFI-newdata/resultpairedlogcbc/ks/Model1.Line.Diet.RFI.Concb.RINb.Conca.RINa.lneut.llymp.lmono.leosi.lbaso.Block.Blockorder/Model1_fit.RData")
# load("U:/R/RA/Data/RFI-newdata/resultpairedlogcbc/ks/Model1.Line.Diet.RFI.Concb.RINb.Conca.RINa.lneut.llymp.lmono.leosi.lbaso.Block.Blockorder/Model1_result.RData")
load("Model1_fit.RData")
load("Model1_result.RData")
#hist(fit$coef[,6], nclass = 1000)
new.beta <- list()
for(i in 1:23)
{
data <- list(y = fit$coef[,i])
m0 <- proc.time()
mm <- jags.model(textConnection(modelm), data,n.chains = 1) # mix point mass
resm <- coda.samples(mm, c("beta","sigma","beta0","sigma0","pi.beta",
"bin.beta")
, 2000) # mix point mass
sigma.i <- apply(resm[[1]][,24563:36842], 2, mean)
sigma.0 <- mean(resm[[1]][, "sigma0"])
beta.0 <- mean(resm[[1]][,"beta0"])
new.beta[[i]] <- fit$coef[,i]*(1/sigma.i^2)/(1/sigma.i^2 + 1/sigma.0^2) +
beta.0 * (1/sigma.0^2)/(1/sigma.i^2 + 1/sigma.0^2)
}
save(new.beta, file = "new.beta.RData")
source("quasiseq shrinkage functions.R")
myQL.fit
new.beta.matrix <- matrix()
for(i in 1:23){
new.beta.matrix <- cbind(new.beta.matrix, new.beta[[i]])
}
dim(new.beta.matrix)
str(new.beta)
length(new.beta[[2]])
load("new.beta.RData")
str(new.beta)
str(fit)
####
require(Matrix)
#library(QuasiSeq)
library(edgeR)
require(reshape)
require(plyr)
library(fields)
library(reshape)
library(fdrtool)
source("QL.fit.R")
source("NBDev.R")
source("PoisDev.R")
source("QL.results.R")
#resultdir <- '/run/user/1000/gvfs/smb-share:server=cyfiles.iastate.edu,share=09/22/ntyet/R/RA/Data/RFI-newdata/resultpaired'
resultdir <- "U:/R/RA/Data/RFI-newdata/resultpairedlogcbc"
scount <- read.table("paired end uniquely mapped reads count table.txt",
header = T)
row.names(scount) <- scount[,1]
# dim(scount)
# str(scount)
# which(scount[,1] %in%"ENSSSCG00000007978")
# which(scount[,1] %in%"ENSSSCG00000014725")
#
# scount[which(scount[,1] %in%"ENSSSCG00000007978"), ]
# scount[which(scount[,1] %in%"ENSSSCG00000014725"), ]
scount <- scount[-c(which(scount[,1] %in%"ENSSSCG00000007978"),
which(scount[,1] %in%"ENSSSCG00000014725")),]
cbc <- read.table('CBC data for pigs with RNA-seq data avaible.txt',
header =T)
metadata <- read.table("meta_data_RNA-seq_G9P2.txt",
header = T)
rfiadj <- read.csv("g8p2_g9p2-rfiadj-FINAL_Jan_2014_rfiadjusted.csv",
header = T)
##### cleaning data####
cbc <- cbc[order(cbc$ear), ]
metadata <- metadata[order(metadata$idpig), ]
rfiadj <- rfiadj[order(rfiadj$idpig),]
fullidpig <- as.numeric(paste("20900", metadata$idpig, sep = ""))
covset <- cbind(metadata[, -4], rfiadj[rfiadj$idpig %in% fullidpig, c("rfi.ADJUSTED")],
cbc[, c("iddam", "idsire", "Neutrophil", "Lymphocyte", "Monocyte",
"Eosinophil", "Basophil" )])
colnames(covset) <- c("idpig", "Line", "Diet", "Block", "Blockorder", "Concb",
"RINb", "Conca", "RINa", "RFI",
"iddam", "idsire", "neut",
"lymp", "mono","eosi", "baso")
#####set of covariates considered ####
covset <- cbind(covset, lneut = log(covset$neut), llymp = log(covset$lymp),
lmono = log(covset$mono), leosi = log(covset$eosi),
lbaso = log(covset$baso))
covset$Line <- as.factor(covset$Line)
covset$Diet <- as.factor(covset$Diet)
covset$Block <- as.factor(covset$Block)
covset$Blockorder <- as.factor(covset$Blockorder)
covset$iddam <- as.factor(covset$iddam)
covset$idsire <- as.factor(covset$idsire)
levels(covset$idsire) <- 1:11
levels(covset$iddam) <- 1:20
covset[, c("iddam", "idsire")]
#detach(covset)
attach(covset)
# counts <- as.matrix(scount[rowSums(scount[,-1]>0)>3&
# rowMeans(scount[,-1])>8 &
# rowSums(scount[,-1][,Line ==1] > 0) >0 &
# rowSums(scount[,-1][, Line ==2] >0) >0 ,-1])
counts <- as.matrix(scount[rowSums(scount[,-1]>0)>3&
rowMeans(scount[,-1])>8 ,-1])
| /EmpiricalBayes.R | no_license | nguyenty/rfi | R | false | false | 4,673 | r | #install.packages("rjags")
library(rjags)
set.seed(20141020)
#############modelm - using point mass mixture prior for signals###############
modelm <- "
model{
# likelihood
for (i in 1:length(y)){
y[i] ~ dnorm(beta[i], 1/sigma[i]^2)
beta[i] <- (1 - bin.beta[i])*norm.beta[i]
bin.beta[i] ~ dbern(pi.beta)
norm.beta[i] ~ dnorm(beta0, 1/sigma0^2)
sigma[i] ~ dunif(0, 100)
}
# prior distribution for the parameters ####
pi.beta ~ dbeta(5, 1)
beta0 ~ dnorm(0, 100)
sigma0 ~ dunif(0, 100)
}
"
# load("U:/R/RA/Data/RFI-newdata/resultpairedlogcbc/ks/Model1.Line.Diet.RFI.Concb.RINb.Conca.RINa.lneut.llymp.lmono.leosi.lbaso.Block.Blockorder/Model1_fit.RData")
# load("U:/R/RA/Data/RFI-newdata/resultpairedlogcbc/ks/Model1.Line.Diet.RFI.Concb.RINb.Conca.RINa.lneut.llymp.lmono.leosi.lbaso.Block.Blockorder/Model1_result.RData")
load("Model1_fit.RData")
load("Model1_result.RData")
#hist(fit$coef[,6], nclass = 1000)
new.beta <- list()
for(i in 1:23)
{
data <- list(y = fit$coef[,i])
m0 <- proc.time()
mm <- jags.model(textConnection(modelm), data,n.chains = 1) # mix point mass
resm <- coda.samples(mm, c("beta","sigma","beta0","sigma0","pi.beta",
"bin.beta")
, 2000) # mix point mass
sigma.i <- apply(resm[[1]][,24563:36842], 2, mean)
sigma.0 <- mean(resm[[1]][, "sigma0"])
beta.0 <- mean(resm[[1]][,"beta0"])
new.beta[[i]] <- fit$coef[,i]*(1/sigma.i^2)/(1/sigma.i^2 + 1/sigma.0^2) +
beta.0 * (1/sigma.0^2)/(1/sigma.i^2 + 1/sigma.0^2)
}
save(new.beta, file = "new.beta.RData")
source("quasiseq shrinkage functions.R")
myQL.fit
new.beta.matrix <- matrix()
for(i in 1:23){
new.beta.matrix <- cbind(new.beta.matrix, new.beta[[i]])
}
dim(new.beta.matrix)
str(new.beta)
length(new.beta[[2]])
load("new.beta.RData")
str(new.beta)
str(fit)
####
require(Matrix)
#library(QuasiSeq)
library(edgeR)
require(reshape)
require(plyr)
library(fields)
library(reshape)
library(fdrtool)
source("QL.fit.R")
source("NBDev.R")
source("PoisDev.R")
source("QL.results.R")
#resultdir <- '/run/user/1000/gvfs/smb-share:server=cyfiles.iastate.edu,share=09/22/ntyet/R/RA/Data/RFI-newdata/resultpaired'
resultdir <- "U:/R/RA/Data/RFI-newdata/resultpairedlogcbc"
scount <- read.table("paired end uniquely mapped reads count table.txt",
header = T)
row.names(scount) <- scount[,1]
# dim(scount)
# str(scount)
# which(scount[,1] %in%"ENSSSCG00000007978")
# which(scount[,1] %in%"ENSSSCG00000014725")
#
# scount[which(scount[,1] %in%"ENSSSCG00000007978"), ]
# scount[which(scount[,1] %in%"ENSSSCG00000014725"), ]
scount <- scount[-c(which(scount[,1] %in%"ENSSSCG00000007978"),
which(scount[,1] %in%"ENSSSCG00000014725")),]
cbc <- read.table('CBC data for pigs with RNA-seq data avaible.txt',
header =T)
metadata <- read.table("meta_data_RNA-seq_G9P2.txt",
header = T)
rfiadj <- read.csv("g8p2_g9p2-rfiadj-FINAL_Jan_2014_rfiadjusted.csv",
header = T)
##### cleaning data####
cbc <- cbc[order(cbc$ear), ]
metadata <- metadata[order(metadata$idpig), ]
rfiadj <- rfiadj[order(rfiadj$idpig),]
fullidpig <- as.numeric(paste("20900", metadata$idpig, sep = ""))
covset <- cbind(metadata[, -4], rfiadj[rfiadj$idpig %in% fullidpig, c("rfi.ADJUSTED")],
cbc[, c("iddam", "idsire", "Neutrophil", "Lymphocyte", "Monocyte",
"Eosinophil", "Basophil" )])
colnames(covset) <- c("idpig", "Line", "Diet", "Block", "Blockorder", "Concb",
"RINb", "Conca", "RINa", "RFI",
"iddam", "idsire", "neut",
"lymp", "mono","eosi", "baso")
#####set of covariates considered ####
covset <- cbind(covset, lneut = log(covset$neut), llymp = log(covset$lymp),
lmono = log(covset$mono), leosi = log(covset$eosi),
lbaso = log(covset$baso))
covset$Line <- as.factor(covset$Line)
covset$Diet <- as.factor(covset$Diet)
covset$Block <- as.factor(covset$Block)
covset$Blockorder <- as.factor(covset$Blockorder)
covset$iddam <- as.factor(covset$iddam)
covset$idsire <- as.factor(covset$idsire)
levels(covset$idsire) <- 1:11
levels(covset$iddam) <- 1:20
covset[, c("iddam", "idsire")]
#detach(covset)
attach(covset)
# counts <- as.matrix(scount[rowSums(scount[,-1]>0)>3&
# rowMeans(scount[,-1])>8 &
# rowSums(scount[,-1][,Line ==1] > 0) >0 &
# rowSums(scount[,-1][, Line ==2] >0) >0 ,-1])
counts <- as.matrix(scount[rowSums(scount[,-1]>0)>3&
rowMeans(scount[,-1])>8 ,-1])
|
kbmf1mkl1mkl_semisupervised_classification_variational_test <- function(Kx, Kz, state) {
Nx <- dim(Kx)[2]
Px <- dim(Kx)[3]
Nz <- dim(Kz)[2]
Pz <- dim(Kz)[3]
R <- dim(state$Ax$mu)[2]
Gx <- list(mu = array(0, c(R, Nx, Px)))
for (m in 1:Px) {
Gx$mu[,,m] <- crossprod(state$Ax$mu, Kx[,,m])
}
Hx <- list(mu = matrix(0, R, Nx))
for (m in 1:Px) {
Hx$mu <- Hx$mu + state$ex$mu[m] * Gx$mu[,,m]
}
Gz <- list(mu = array(0, c(R, Nz, Pz)))
for (n in 1:Pz) {
Gz$mu[,,n] <- crossprod(state$Az$mu, Kz[,,n])
}
Hz <- list(mu = matrix(0, R, Nz))
for (n in 1:Pz) {
Hz$mu <- Hz$mu + state$ez$mu[n] * Gz$mu[,,n]
}
F <- list(mu = crossprod(Hx$mu, Hz$mu))
prediction <- list(Gx = Gx, Hx = Hx, Gz = Gz, Hz = Hz, F = F)
}
| /GRGMF/kbmf2k/kbmf1mkl1mkl/kbmf1mkl1mkl_semisupervised_classification_variational_test.R | no_license | RondoChow/GRGMF | R | false | false | 754 | r | kbmf1mkl1mkl_semisupervised_classification_variational_test <- function(Kx, Kz, state) {
Nx <- dim(Kx)[2]
Px <- dim(Kx)[3]
Nz <- dim(Kz)[2]
Pz <- dim(Kz)[3]
R <- dim(state$Ax$mu)[2]
Gx <- list(mu = array(0, c(R, Nx, Px)))
for (m in 1:Px) {
Gx$mu[,,m] <- crossprod(state$Ax$mu, Kx[,,m])
}
Hx <- list(mu = matrix(0, R, Nx))
for (m in 1:Px) {
Hx$mu <- Hx$mu + state$ex$mu[m] * Gx$mu[,,m]
}
Gz <- list(mu = array(0, c(R, Nz, Pz)))
for (n in 1:Pz) {
Gz$mu[,,n] <- crossprod(state$Az$mu, Kz[,,n])
}
Hz <- list(mu = matrix(0, R, Nz))
for (n in 1:Pz) {
Hz$mu <- Hz$mu + state$ez$mu[n] * Gz$mu[,,n]
}
F <- list(mu = crossprod(Hx$mu, Hz$mu))
prediction <- list(Gx = Gx, Hx = Hx, Gz = Gz, Hz = Hz, F = F)
}
|
#' @title Compute METE species area relationship (SAR)
#'
#' @description Uses raw data or state variables to calculate METE SAR
#' and EAR (endemics area relatiohsip) as well as compute the observed
#' SAR or EAR from data, if provided
#'
#' @details Currently only doublings of area are supported. Predictions
#' and comparison to data can be made via several options. If \code{spp}
#' and \code{abund} are not provided then only theoretical predictions
#' are returned without emperical SAR or EAR results. In this case areas
#' can either be specified by providing \code{Amin} and \code{A0} from
#' which a vector of doubling areas is computed, or my providing \code{row},
#' \code{col} and \code{A0} in which case \code{row} and \code{col} are
#' taken to be the number of desired rows and columns used to construct
#' a grid across the landscape. If data are provided in the form of
#' \code{spp} and \code{abund} then either \code{row} and \code{col} or
#' \code{x} and \code{y} must be provided for each data entry (i.e. the
#' length of \code{row} and \code{col} or \code{x} and \code{y} must equal
#' the length of \code{spp} and \code{abund}). If \code{x} and \code{y}
#' are provided then the landscape is gridded either by specifying
#' \code{Amin} (the size of the smallest grid cell) or by providing the
#' number or desired rows and columns via the \code{row} and \code{col}
#' arguments.
#'
#' SARs and EARs can be predicted either interatively or non-iteratively.
#' In the non-iterative case the SAD and SSAD (which are used to calculate
#' the SAR or EAR prediction) are derived from state variables at one
#' anchor scale. In the iterative approach state variables are re-calculated
#' at each scale. Currently downscaling and upscaling are done differently (
#' downscaling is only implemented in the non-iterative approach, whereas
#' upscaling is only implemented in the iterative approach). The reason is
#' largely historical (downscaling as originally done non-iteratively while
#' upscaling was first proposed in an iterative framework). Future implementations
#' in \code{meteR} will allow for both iterative and non-iterative approaches
#' to upscaling and downscaling. While iterative and non-iterative methods lead to
#' slightly different predictions these are small in comparison to typical ranges of
#' state variables (see Harte 2011).
#'
#'
#' @param spp vector of species identities
#' @param abund numberic vector abundances associated with each record
#' @param row identity of row in a gridded landscape associated with each record, or desired number of rows to divide the landcape into
#' @param col identity of column in a gridded landscape associated with each recod, or desired number of columns to divide the landcape into
#' @param x the x-coordinate of an individual if recorded
#' @param y the y-coordinate of an individual if recorded
#' @param S0 total number of species
#' @param N0 total abundance
#' @param Amin the smallest area, either the anchor area for upscaling or the desired area to downscale to
#' @param A0 the largest area, either the area to upscale to or the total area from which to downscale
#' @param upscale logical, should upscaling or downscaling be carried out
#' @param EAR logical, should the EAR or SAR be computed
#'
#' @export
#'
#' @examples
#' \dontrun{
#' data(anbo)
#'
#' ## using row and col from anbo dataset
#' anbo.sar1 <- meteSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16)
#' plot(anbo.sar1)
#'
#' ## using simulated x, y data
#' anbo.sar2 <- meteSAR(anbo$spp, anbo$count, x=anbo$x, y=anbo$y, row=4, col=4)
#' plot(anbo.sar2)
#'
#' ## using just state variable
#' thr.sar <- meteSAR(Amin=1, A0=16, S0=50, N0=500)
#' }
#' @return an object of class \code{meteRelat} with elements
#' \describe{
#' \item{\code{pred}}{predicted relationship; an object of class \code{sar}}
#' \item{\code{obs}}{observed relationship; an object of class\code{sar}}
#' }
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso sad, meteESF, metePi
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
meteSAR <- function(spp, abund, row, col, x, y, S0 = NULL, N0 = NULL,
Amin, A0, upscale=FALSE, EAR=FALSE) {
## figure out vector of sizes in units of cells; right now only doublings supported
## not needed if upscale is TRUE
if(!upscale) {
areaInfo <- .findAreas(
spp=if(missing(spp)) NULL else spp,
abund=if(missing(abund)) NULL else abund,
row=if(missing(row)) NULL else row,
col=if(missing(col)) NULL else col,
x=if(missing(x)) NULL else x,
y=if(missing(y)) NULL else y,
Amin=if(missing(Amin)) NULL else Amin,
A0=if(missing(A0)) NULL else A0)
areas <- areaInfo$areas
row <- areaInfo$row
col <- areaInfo$col
nrow <- areaInfo$nrow
ncol <- areaInfo$ncol
Amin <- areaInfo$Amin
A0 <- areaInfo$A0
}
if(upscale & EAR) stop('upscaling EAR not currently supported')
## the ESF
if(!missing(spp) & !missing(abund)) {
S0 <- length(unique(spp))
N0 <- sum(abund)
}
if(is.null(S0) | is.null(N0)) stop('must provide spp and abund data or state variables S0 and N0')
thisESF <- meteESF(S0=S0, N0=N0)
## calculate empirical SAR
if(!missing(spp) & !missing(abund)) {
eSAR <- empiricalSAR(spp, abund, row=row, col=col, Amin=Amin, A0=A0, EAR=EAR)
} else {
eSAR <- NULL
}
## calculate theoretical SAR
if(upscale) {
thrSAR <- upscaleSAR(thisESF, Amin, A0, EAR)
} else {
thrSAR <- downscaleSAR(thisESF, areas*Amin, A0, EAR)
}
out <- list(obs=eSAR, pred=thrSAR)
class(out) <- 'meteRelat'
return(out)
}
#================================================================
#' @title Empirical SAR or EAR
#'
#' @description computes observed SAR or EAR from raw data
#'
#' @details Currently only doublings of area are supported. There are
#' several options for specifying areas. Either \code{row} and \code{col} or
#' \code{x} and \code{y} must be provided for each data entry (i.e. the
#' length of \code{row} and \code{col} or \code{x} and \code{y} must equal
#' the length of \code{spp} and \code{abund}). If \code{x} and \code{y}
#' are provided then the landscape is gridded either by specifying
#' \code{Amin} (the size of the smallest grid cell) or by providing the
#' number or desired rows and columns via the \code{row} and \code{col}
#' arguments. If only \code{row} and \code{col} are provided these are taken
#' to be the row and column identities of each data entry
#'
#'
#'
#' @param spp vector of species identities
#' @param abund numberic vector abundances associated with each record
#' @param row identity of row in a gridded landscape associated with each record, or desired number of rows to divide the landcape into
#' @param col identity of column in a gridded landscape associated with each recod, or desired number of columns to divide the landcape into
#' @param x the x-coordinate of an individual if recorded
#' @param y the y-coordinate of an individual if recorded
#' @param Amin the smallest area, either the anchor area for upscaling or the desired area to downscale to
#' @param A0 the largest area, either the area to upscale to or the total area from which to downscale
#' @param EAR logical, should the EAR or SAR be computed
#'
#' @export
#'
#' @examples
#' data(anbo)
#' anbo.obs.sar <- empiricalSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16)
#' plot(anbo.obs.sar)
#' anbo.obs.ear <- empiricalSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16, EAR=TRUE)
#' plot(anbo.obs.ear)
#'
#' ## empirical SAR from simulated x, y data
#' anbo$x <- runif(nrow(anbo), 0, 1) + anbo$column
#' anbo$y <- runif(nrow(anbo), 0, 1) + anbo$row
#' meteSAR(anbo$spp, anbo$count, x=anbo$x, y=anbo$y, row=4, col=4)
#'
#' @return an object of class \code{sar} inheriting from \code{data.frame} with
#' columns \code{A} and \code{S} giving area and species richness, respectively
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso meteESF, meteSAR, downscaleSAR, upscaleSAR
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
empiricalSAR <- function(spp, abund, row, col, x, y, Amin, A0, EAR=FALSE) {
## figure out vector of sizes in units of cells; right now only doublings supported
areaInfo <- .findAreas(
spp=if(missing(spp)) NULL else spp,
abund=if(missing(abund)) NULL else abund,
row=if(missing(row)) NULL else row,
col=if(missing(col)) NULL else col,
x=if(missing(x)) NULL else x,
y=if(missing(y)) NULL else y,
Amin=if(missing(Amin)) NULL else Amin,
A0=if(missing(A0)) NULL else A0)
areas <- areaInfo$areas
row <- areaInfo$row
col <- areaInfo$col
nrow <- areaInfo$nrow
ncol <- areaInfo$ncol
Amin <- areaInfo$Amin
A0 <- areaInfo$A0
## loop over areas
out <- lapply(areas, function(a) {
nspp <- .getSppInGroups(spp, abund, row, col, .getNeighbors(a, nrow, ncol), EAR)
data.frame(A=a*Amin, S=nspp)
})
out <- do.call(rbind, out)
## make output of class `sar' and tell it about empirical v. theoretical and ear v. sar
attr(out, 'source') <- 'empirical'
attr(out, 'type') <- ifelse(EAR, 'ear', 'sar')
class(out) <- 'sar'
return(out)
}
#================================================================
#' @title Downscale the species area relationship (SAR) or endemics area relationship (EAR)
#'
#' @description Compute METE SAR by downscaling from some larger area \code{A0} to a smaller areas.
#'
#' @details Downscaling is done non-iteratively (i.e. the SAD and SSAD are calculated based on state variables at the anchor scale A0) thus unlike the upscaling SAR function, downscaling can be computed for any arbitrary scale
#' \eqn{\leq A_0}.
#'
#' @param x an object of class meteESF
#' @param A numerical vector of areas (<= \code{A0}) for which the METE prediction is desired
#' @param A0 total study area
#' @param EAR logical. TRUE computes the endemics area relatinship
#'
#' @export
#'
#' @examples
#' data(anbo)
#' anbo.esf <- meteESF(spp=anbo$spp, abund=anbo$count)
#' anbo.thr.downscale <- downscaleSAR(anbo.esf, 2^(seq(-3, 4, length=7)), 16)
#' plot(anbo.thr.downscale)
#'
#' ## theoretical SARs from state variables only
#' thr.downscale <- downscaleSAR(meteESF(S0=40, N0=400), 2^seq(-1,4,by=1), 16)
#' thr.downscaleEAR <- downscaleSAR(meteESF(S0=40, N0=400), 2^seq(-1, 4, by=1), 16, EAR=TRUE)
#' plot(thr.downscale, ylim=c(0, 40), col='red')
#' plot(thr.downscaleEAR, add=TRUE, col='blue')
#'
#' @return an object of class \code{sar} inheriting from \code{data.frame} with
#' columns \code{A} and \code{S} giving area and species richness, respectively
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso meteESF, meteSAR, empiricalSAR, upscaleSAR
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
downscaleSAR <- function(x, A, A0, EAR=FALSE) {
n0 <- 1:x$state.var['N0']
## difference between EAR and SAR is for EAR we get Pi(n0) [fun .getPin0]
## and for SAR we get 1 - Pi(0) [1 - .getPi0]
if(EAR) {
piFun <- function(a) .getPin0(n0, a, A0)
} else {
piFun <- function(a) 1 - .getPi0(n0, a, A0)
}
## function to get species number at scale `a'
getspp <- function(a) {
probs <- piFun(a) *
with(x,
metePhi(n0, La[1], La[2], Z,
state.var['S0'], state.var['N0'],
ifelse(is.na(state.var['E0']), state.var['N0']*10^3, state.var['E0'])))
return(x$state.var['S0'] * sum(probs))
}
## loop over A
nspp <- sapply(A, getspp)
## should return matrix with column for area and column for spp
out <- data.frame(A=A, S=nspp)
attr(out, 'source') <- 'theoretical'
attr(out, 'type') <- ifelse(EAR, 'ear', 'sar')
class(out) <- 'sar'
return(out)
}
#================================================================
#' @title upscale SAR
#'
#' @description Based on information at an anchor scale (\code{A0})
#' calcuate predicted species area relationship at larger scales
#'
#' @details Currently only doublings of area are supported and only
#' the SAR (not EAR) is supported. Upscaling works by iteratively
#' solving for the constraints (\eqn{S} and \eqn{N} at larger scales)
#' that would lead to the observed data at the anchor scale. See
#' references for more details on this approach.
#'
#'
#' @param x an object of class meteESF
#' @param A0 the anchor scale at which community data are availible.
#' @param Aup the larges area to which to upscale
#' @param EAR logical. TRUE computes the endemics area relatinship; currently not supported
#'
#' @export
#'
#' @examples
## combine SAR for scales at which we have data with upscaled SAR
#' data(anbo)
#' anbo.sar <- meteSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16)
#' anbo.sar
#' plot(anbo.sar, xlim=c(1, 2^10), ylim=c(3, 50), log='xy')
#'
#' ## get upscaled SAR and add to plot
#' anbo.esf <- meteESF(spp=anbo$spp, abund=anbo$count) # need ESF for upscaling
#' anbo.sarUP <- upscaleSAR(anbo.esf, 16, 2^10)
#' plot(anbo.sarUP, add=TRUE, col='blue')
#'
#'
#' @return an object of class \code{sar} inheriting from \code{data.frame} with
#' columns \code{A} and \code{S} giving area and species richness, respectively
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso meteESF, meteSAR, empiricalSAR, downscaleSAR
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
upscaleSAR <- function(x, A0, Aup, EAR=FALSE) {
## vector of areas starting with anchor area A0
Aups <- A0 * 2^(0:ceiling(log(Aup/A0)/log(2)))
## vector of abundances at each area
N0s <- x$state.var['N0'] * 2^(0:ceiling(log(Aup/A0)/log(2)))
## vector of number of species at each area
S0s <- numeric(length(Aups))
S0s[1] <- x$state.var['S0']
## vector to hold termination codes from nleqslv about whether optimization succeeded
termcodes <- numeric(length(Aups))
## need to recursively solve constraint fun (solution in `.solveUpscale') up to Aup
for(i in 2:length(Aups)) {
S0s[i] <- .solveUpscale(S0s[i-1], N0s[i-1])
}
## should return matrix with column for area and column for spp
out <- data.frame(A=Aups, S=S0s)
attr(out, 'source') <- 'theoretical'
attr(out, 'type') <- ifelse(EAR, 'ear', 'sar')
class(out) <- 'sar'
return(out)
}
| /R/sar.R | no_license | cmerow/meteR | R | false | false | 15,404 | r | #' @title Compute METE species area relationship (SAR)
#'
#' @description Uses raw data or state variables to calculate METE SAR
#' and EAR (endemics area relatiohsip) as well as compute the observed
#' SAR or EAR from data, if provided
#'
#' @details Currently only doublings of area are supported. Predictions
#' and comparison to data can be made via several options. If \code{spp}
#' and \code{abund} are not provided then only theoretical predictions
#' are returned without emperical SAR or EAR results. In this case areas
#' can either be specified by providing \code{Amin} and \code{A0} from
#' which a vector of doubling areas is computed, or my providing \code{row},
#' \code{col} and \code{A0} in which case \code{row} and \code{col} are
#' taken to be the number of desired rows and columns used to construct
#' a grid across the landscape. If data are provided in the form of
#' \code{spp} and \code{abund} then either \code{row} and \code{col} or
#' \code{x} and \code{y} must be provided for each data entry (i.e. the
#' length of \code{row} and \code{col} or \code{x} and \code{y} must equal
#' the length of \code{spp} and \code{abund}). If \code{x} and \code{y}
#' are provided then the landscape is gridded either by specifying
#' \code{Amin} (the size of the smallest grid cell) or by providing the
#' number or desired rows and columns via the \code{row} and \code{col}
#' arguments.
#'
#' SARs and EARs can be predicted either interatively or non-iteratively.
#' In the non-iterative case the SAD and SSAD (which are used to calculate
#' the SAR or EAR prediction) are derived from state variables at one
#' anchor scale. In the iterative approach state variables are re-calculated
#' at each scale. Currently downscaling and upscaling are done differently (
#' downscaling is only implemented in the non-iterative approach, whereas
#' upscaling is only implemented in the iterative approach). The reason is
#' largely historical (downscaling as originally done non-iteratively while
#' upscaling was first proposed in an iterative framework). Future implementations
#' in \code{meteR} will allow for both iterative and non-iterative approaches
#' to upscaling and downscaling. While iterative and non-iterative methods lead to
#' slightly different predictions these are small in comparison to typical ranges of
#' state variables (see Harte 2011).
#'
#'
#' @param spp vector of species identities
#' @param abund numberic vector abundances associated with each record
#' @param row identity of row in a gridded landscape associated with each record, or desired number of rows to divide the landcape into
#' @param col identity of column in a gridded landscape associated with each recod, or desired number of columns to divide the landcape into
#' @param x the x-coordinate of an individual if recorded
#' @param y the y-coordinate of an individual if recorded
#' @param S0 total number of species
#' @param N0 total abundance
#' @param Amin the smallest area, either the anchor area for upscaling or the desired area to downscale to
#' @param A0 the largest area, either the area to upscale to or the total area from which to downscale
#' @param upscale logical, should upscaling or downscaling be carried out
#' @param EAR logical, should the EAR or SAR be computed
#'
#' @export
#'
#' @examples
#' \dontrun{
#' data(anbo)
#'
#' ## using row and col from anbo dataset
#' anbo.sar1 <- meteSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16)
#' plot(anbo.sar1)
#'
#' ## using simulated x, y data
#' anbo.sar2 <- meteSAR(anbo$spp, anbo$count, x=anbo$x, y=anbo$y, row=4, col=4)
#' plot(anbo.sar2)
#'
#' ## using just state variable
#' thr.sar <- meteSAR(Amin=1, A0=16, S0=50, N0=500)
#' }
#' @return an object of class \code{meteRelat} with elements
#' \describe{
#' \item{\code{pred}}{predicted relationship; an object of class \code{sar}}
#' \item{\code{obs}}{observed relationship; an object of class\code{sar}}
#' }
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso sad, meteESF, metePi
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
meteSAR <- function(spp, abund, row, col, x, y, S0 = NULL, N0 = NULL,
Amin, A0, upscale=FALSE, EAR=FALSE) {
## figure out vector of sizes in units of cells; right now only doublings supported
## not needed if upscale is TRUE
if(!upscale) {
areaInfo <- .findAreas(
spp=if(missing(spp)) NULL else spp,
abund=if(missing(abund)) NULL else abund,
row=if(missing(row)) NULL else row,
col=if(missing(col)) NULL else col,
x=if(missing(x)) NULL else x,
y=if(missing(y)) NULL else y,
Amin=if(missing(Amin)) NULL else Amin,
A0=if(missing(A0)) NULL else A0)
areas <- areaInfo$areas
row <- areaInfo$row
col <- areaInfo$col
nrow <- areaInfo$nrow
ncol <- areaInfo$ncol
Amin <- areaInfo$Amin
A0 <- areaInfo$A0
}
if(upscale & EAR) stop('upscaling EAR not currently supported')
## the ESF
if(!missing(spp) & !missing(abund)) {
S0 <- length(unique(spp))
N0 <- sum(abund)
}
if(is.null(S0) | is.null(N0)) stop('must provide spp and abund data or state variables S0 and N0')
thisESF <- meteESF(S0=S0, N0=N0)
## calculate empirical SAR
if(!missing(spp) & !missing(abund)) {
eSAR <- empiricalSAR(spp, abund, row=row, col=col, Amin=Amin, A0=A0, EAR=EAR)
} else {
eSAR <- NULL
}
## calculate theoretical SAR
if(upscale) {
thrSAR <- upscaleSAR(thisESF, Amin, A0, EAR)
} else {
thrSAR <- downscaleSAR(thisESF, areas*Amin, A0, EAR)
}
out <- list(obs=eSAR, pred=thrSAR)
class(out) <- 'meteRelat'
return(out)
}
#================================================================
#' @title Empirical SAR or EAR
#'
#' @description computes observed SAR or EAR from raw data
#'
#' @details Currently only doublings of area are supported. There are
#' several options for specifying areas. Either \code{row} and \code{col} or
#' \code{x} and \code{y} must be provided for each data entry (i.e. the
#' length of \code{row} and \code{col} or \code{x} and \code{y} must equal
#' the length of \code{spp} and \code{abund}). If \code{x} and \code{y}
#' are provided then the landscape is gridded either by specifying
#' \code{Amin} (the size of the smallest grid cell) or by providing the
#' number or desired rows and columns via the \code{row} and \code{col}
#' arguments. If only \code{row} and \code{col} are provided these are taken
#' to be the row and column identities of each data entry
#'
#'
#'
#' @param spp vector of species identities
#' @param abund numberic vector abundances associated with each record
#' @param row identity of row in a gridded landscape associated with each record, or desired number of rows to divide the landcape into
#' @param col identity of column in a gridded landscape associated with each recod, or desired number of columns to divide the landcape into
#' @param x the x-coordinate of an individual if recorded
#' @param y the y-coordinate of an individual if recorded
#' @param Amin the smallest area, either the anchor area for upscaling or the desired area to downscale to
#' @param A0 the largest area, either the area to upscale to or the total area from which to downscale
#' @param EAR logical, should the EAR or SAR be computed
#'
#' @export
#'
#' @examples
#' data(anbo)
#' anbo.obs.sar <- empiricalSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16)
#' plot(anbo.obs.sar)
#' anbo.obs.ear <- empiricalSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16, EAR=TRUE)
#' plot(anbo.obs.ear)
#'
#' ## empirical SAR from simulated x, y data
#' anbo$x <- runif(nrow(anbo), 0, 1) + anbo$column
#' anbo$y <- runif(nrow(anbo), 0, 1) + anbo$row
#' meteSAR(anbo$spp, anbo$count, x=anbo$x, y=anbo$y, row=4, col=4)
#'
#' @return an object of class \code{sar} inheriting from \code{data.frame} with
#' columns \code{A} and \code{S} giving area and species richness, respectively
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso meteESF, meteSAR, downscaleSAR, upscaleSAR
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
empiricalSAR <- function(spp, abund, row, col, x, y, Amin, A0, EAR=FALSE) {
## figure out vector of sizes in units of cells; right now only doublings supported
areaInfo <- .findAreas(
spp=if(missing(spp)) NULL else spp,
abund=if(missing(abund)) NULL else abund,
row=if(missing(row)) NULL else row,
col=if(missing(col)) NULL else col,
x=if(missing(x)) NULL else x,
y=if(missing(y)) NULL else y,
Amin=if(missing(Amin)) NULL else Amin,
A0=if(missing(A0)) NULL else A0)
areas <- areaInfo$areas
row <- areaInfo$row
col <- areaInfo$col
nrow <- areaInfo$nrow
ncol <- areaInfo$ncol
Amin <- areaInfo$Amin
A0 <- areaInfo$A0
## loop over areas
out <- lapply(areas, function(a) {
nspp <- .getSppInGroups(spp, abund, row, col, .getNeighbors(a, nrow, ncol), EAR)
data.frame(A=a*Amin, S=nspp)
})
out <- do.call(rbind, out)
## make output of class `sar' and tell it about empirical v. theoretical and ear v. sar
attr(out, 'source') <- 'empirical'
attr(out, 'type') <- ifelse(EAR, 'ear', 'sar')
class(out) <- 'sar'
return(out)
}
#================================================================
#' @title Downscale the species area relationship (SAR) or endemics area relationship (EAR)
#'
#' @description Compute METE SAR by downscaling from some larger area \code{A0} to a smaller areas.
#'
#' @details Downscaling is done non-iteratively (i.e. the SAD and SSAD are calculated based on state variables at the anchor scale A0) thus unlike the upscaling SAR function, downscaling can be computed for any arbitrary scale
#' \eqn{\leq A_0}.
#'
#' @param x an object of class meteESF
#' @param A numerical vector of areas (<= \code{A0}) for which the METE prediction is desired
#' @param A0 total study area
#' @param EAR logical. TRUE computes the endemics area relatinship
#'
#' @export
#'
#' @examples
#' data(anbo)
#' anbo.esf <- meteESF(spp=anbo$spp, abund=anbo$count)
#' anbo.thr.downscale <- downscaleSAR(anbo.esf, 2^(seq(-3, 4, length=7)), 16)
#' plot(anbo.thr.downscale)
#'
#' ## theoretical SARs from state variables only
#' thr.downscale <- downscaleSAR(meteESF(S0=40, N0=400), 2^seq(-1,4,by=1), 16)
#' thr.downscaleEAR <- downscaleSAR(meteESF(S0=40, N0=400), 2^seq(-1, 4, by=1), 16, EAR=TRUE)
#' plot(thr.downscale, ylim=c(0, 40), col='red')
#' plot(thr.downscaleEAR, add=TRUE, col='blue')
#'
#' @return an object of class \code{sar} inheriting from \code{data.frame} with
#' columns \code{A} and \code{S} giving area and species richness, respectively
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso meteESF, meteSAR, empiricalSAR, upscaleSAR
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
downscaleSAR <- function(x, A, A0, EAR=FALSE) {
n0 <- 1:x$state.var['N0']
## difference between EAR and SAR is for EAR we get Pi(n0) [fun .getPin0]
## and for SAR we get 1 - Pi(0) [1 - .getPi0]
if(EAR) {
piFun <- function(a) .getPin0(n0, a, A0)
} else {
piFun <- function(a) 1 - .getPi0(n0, a, A0)
}
## function to get species number at scale `a'
getspp <- function(a) {
probs <- piFun(a) *
with(x,
metePhi(n0, La[1], La[2], Z,
state.var['S0'], state.var['N0'],
ifelse(is.na(state.var['E0']), state.var['N0']*10^3, state.var['E0'])))
return(x$state.var['S0'] * sum(probs))
}
## loop over A
nspp <- sapply(A, getspp)
## should return matrix with column for area and column for spp
out <- data.frame(A=A, S=nspp)
attr(out, 'source') <- 'theoretical'
attr(out, 'type') <- ifelse(EAR, 'ear', 'sar')
class(out) <- 'sar'
return(out)
}
#================================================================
#' @title upscale SAR
#'
#' @description Based on information at an anchor scale (\code{A0})
#' calcuate predicted species area relationship at larger scales
#'
#' @details Currently only doublings of area are supported and only
#' the SAR (not EAR) is supported. Upscaling works by iteratively
#' solving for the constraints (\eqn{S} and \eqn{N} at larger scales)
#' that would lead to the observed data at the anchor scale. See
#' references for more details on this approach.
#'
#'
#' @param x an object of class meteESF
#' @param A0 the anchor scale at which community data are availible.
#' @param Aup the larges area to which to upscale
#' @param EAR logical. TRUE computes the endemics area relatinship; currently not supported
#'
#' @export
#'
#' @examples
## combine SAR for scales at which we have data with upscaled SAR
#' data(anbo)
#' anbo.sar <- meteSAR(anbo$spp, anbo$count, anbo$row, anbo$col, Amin=1, A0=16)
#' anbo.sar
#' plot(anbo.sar, xlim=c(1, 2^10), ylim=c(3, 50), log='xy')
#'
#' ## get upscaled SAR and add to plot
#' anbo.esf <- meteESF(spp=anbo$spp, abund=anbo$count) # need ESF for upscaling
#' anbo.sarUP <- upscaleSAR(anbo.esf, 16, 2^10)
#' plot(anbo.sarUP, add=TRUE, col='blue')
#'
#'
#' @return an object of class \code{sar} inheriting from \code{data.frame} with
#' columns \code{A} and \code{S} giving area and species richness, respectively
#'
#' @author Andy Rominger <ajrominger@@gmail.com>, Cory Merow
#' @seealso meteESF, meteSAR, empiricalSAR, downscaleSAR
#' @references Harte, J. 2011. Maximum entropy and ecology: a theory of abundance, distribution, and energetics. Oxford University Press.
# @aliases - a list of additional topic names that will be mapped to
# this documentation when the user looks them up from the command
# line.
# @family sar
upscaleSAR <- function(x, A0, Aup, EAR=FALSE) {
## vector of areas starting with anchor area A0
Aups <- A0 * 2^(0:ceiling(log(Aup/A0)/log(2)))
## vector of abundances at each area
N0s <- x$state.var['N0'] * 2^(0:ceiling(log(Aup/A0)/log(2)))
## vector of number of species at each area
S0s <- numeric(length(Aups))
S0s[1] <- x$state.var['S0']
## vector to hold termination codes from nleqslv about whether optimization succeeded
termcodes <- numeric(length(Aups))
## need to recursively solve constraint fun (solution in `.solveUpscale') up to Aup
for(i in 2:length(Aups)) {
S0s[i] <- .solveUpscale(S0s[i-1], N0s[i-1])
}
## should return matrix with column for area and column for spp
out <- data.frame(A=Aups, S=S0s)
attr(out, 'source') <- 'theoretical'
attr(out, 'type') <- ifelse(EAR, 'ear', 'sar')
class(out) <- 'sar'
return(out)
}
|
\name{plot.warn}
\alias{plot.warn}
\title{
Plot for posterior weaning parameters
}
\description{
\code{plot.warn} is a plotting utile for quick visualization of the result of weaning age reconstruction from an object of class \code{"warn"}. The measured and modeled bone collagen nitrogen isotope ratios (d15Ns) are plotted ageinst estimated ages.
}
\usage{
\method{plot}{warn}(x, hline.female = TRUE, hline.adult = FALSE,
adult.mean = NA, adult.sd = 0,
is.legend = TRUE, is.female = TRUE, plot = TRUE, \dots)
}
\arguments{
\item{x}{an object of class \code{"warn"} generated by \code{\link{warn}}.}
\item{hline.female, hline.adult}{logical; if \code{TRUE}, horizontal dotted lines indicating one standard deviation ranges of adult females and all adults are plotted on the figure; the default is \code{TRUE} for female and \code{FALSE} for adult.}
\item{adult.mean}{the mean d15N of all adults, an optional single value.}
\item{adult.sd}{the one standard deviation of all adult d15N, an optional single value. Utilized later in \code{\link{plot.warn}}.}
\item{is.legend}{logical; if \code{TRUE} (the default), legend is plotted on the figure.}
\item{is.female}{logical; if \code{TRUE} (the default), female mean d15N is plotted on the figure.}
\item{plot}{logical; if \code{TRUE} (the default), a figure is plotted, otherwise a list of d15N changes in modeled bone and modeled diet is returned.}
\item{\dots}{additional arguments passed to \code{\link{plot}}.}
}
\details{
The following three results on d15N are plotted against age, the original measured ratios of non-adult bone collagen (Measured d15N), modeled ratios of non-adult bone collagen (Modeled d15N), and modeled ratios of collagen synthesized entirely from consumed foods in the last half year (Modeld diet). These results are calculated in \code{\link{warn}}. Mean d15Ns of adult females and all adults can also be plotted.
}
\references{
Tsutaya, T., and Yoneda, M. (2013). Quantitative reconstruction of weaning ages in archaeological human populations using bone collagen nitrogen isotope ratios and approximate Bayesian computation. \emph{PLoS ONE} \bold{8}, e72327.
}
\seealso{
\code{\link{WARN}}, \code{\link{warn}}, \code{\link{warnCI}}, \code{\link{warnProb}}, \code{\link{summary.warn}}
}
\examples{
## See ?warn for examples.
}
\keyword{math}
| /man/plot.warn.Rd | no_license | cran/WARN | R | false | false | 2,352 | rd | \name{plot.warn}
\alias{plot.warn}
\title{
Plot for posterior weaning parameters
}
\description{
\code{plot.warn} is a plotting utile for quick visualization of the result of weaning age reconstruction from an object of class \code{"warn"}. The measured and modeled bone collagen nitrogen isotope ratios (d15Ns) are plotted ageinst estimated ages.
}
\usage{
\method{plot}{warn}(x, hline.female = TRUE, hline.adult = FALSE,
adult.mean = NA, adult.sd = 0,
is.legend = TRUE, is.female = TRUE, plot = TRUE, \dots)
}
\arguments{
\item{x}{an object of class \code{"warn"} generated by \code{\link{warn}}.}
\item{hline.female, hline.adult}{logical; if \code{TRUE}, horizontal dotted lines indicating one standard deviation ranges of adult females and all adults are plotted on the figure; the default is \code{TRUE} for female and \code{FALSE} for adult.}
\item{adult.mean}{the mean d15N of all adults, an optional single value.}
\item{adult.sd}{the one standard deviation of all adult d15N, an optional single value. Utilized later in \code{\link{plot.warn}}.}
\item{is.legend}{logical; if \code{TRUE} (the default), legend is plotted on the figure.}
\item{is.female}{logical; if \code{TRUE} (the default), female mean d15N is plotted on the figure.}
\item{plot}{logical; if \code{TRUE} (the default), a figure is plotted, otherwise a list of d15N changes in modeled bone and modeled diet is returned.}
\item{\dots}{additional arguments passed to \code{\link{plot}}.}
}
\details{
The following three results on d15N are plotted against age, the original measured ratios of non-adult bone collagen (Measured d15N), modeled ratios of non-adult bone collagen (Modeled d15N), and modeled ratios of collagen synthesized entirely from consumed foods in the last half year (Modeld diet). These results are calculated in \code{\link{warn}}. Mean d15Ns of adult females and all adults can also be plotted.
}
\references{
Tsutaya, T., and Yoneda, M. (2013). Quantitative reconstruction of weaning ages in archaeological human populations using bone collagen nitrogen isotope ratios and approximate Bayesian computation. \emph{PLoS ONE} \bold{8}, e72327.
}
\seealso{
\code{\link{WARN}}, \code{\link{warn}}, \code{\link{warnCI}}, \code{\link{warnProb}}, \code{\link{summary.warn}}
}
\examples{
## See ?warn for examples.
}
\keyword{math}
|
start_softmax = 0.3
start_lazy_sticky = -1
model_selection = 0:2
participant_selection = 1:1
fnscale = -1
################################
# functions
################################
logit <- function (a){
exp(a)/(exp(a)+1)
}
logit_reverse <- function (y){
log(y/(1-y))
}
softmax <- function (raw, temperature){
exp(raw/temperature)/rowSums(exp(raw/temperature))
}
lazyness <- function (raw, lazyness_param){
lazyness_logit = logit (lazyness_param)
sweep(raw*(1-lazyness_logit), 2, c(0,0,1)*lazyness_logit,"+")
}
stickiness <- function (raw, raw_previous, stickiness_param){
stickiness_logit = logit(stickiness_param)
raw*(1-stickiness_logit)+raw_previous*stickiness_logit
}
################################
# setting up df
################################
df.mc_likelihood <- data.frame(matrix("", nrow = (length(df.results_V2[,2])), ncol = 12))
colnames (df.mc_likelihood) <- c("real_up", "real_down","real_stay","PID_up","PID_down","PID_stay","lazy_up","lazy_down","lazy_stay", "bayes_up","bayes_down","bayes_stay")
# intervention:
# 1 = up, 3=down, 0=stay
df.mc_likelihood[,1] = as.data.frame(ifelse (df.results_V2[,3]==1, 1, 0))
df.mc_likelihood[,2]= as.data.frame(ifelse (df.results_V2[,3]==3, 1, 0))
df.mc_likelihood[,3] = as.data.frame(ifelse (df.results_V2[,3]==0, 1, 0))
df.mc_likelihood[,4] = ifelse ((df.results_V2[,2]-df.results_V2[,5]) < 0, 1,0) #below (press up)
df.mc_likelihood[,5] = ifelse ((df.results_V2[,2]-df.results_V2[,5]) > 0, 1,0) #above (press down)
df.mc_likelihood[,6] = ifelse ((df.results_V2[,2]-df.results_V2[,5]) == 0, 1,0)
df.mc_likelihood[,7] =as.data.frame(rep(0,176000))
df.mc_likelihood[,8] = as.data.frame(rep(0,176000))
df.mc_likelihood[,9] = as.data.frame(rep(1,176000))
df.mc_likelihood[,10] = as.data.frame(df.bayes_V2[,5] * df.bayes_opt_response_V2[,1]+df.bayes_V2[,6] * df.bayes_opt_response_V2[,1]+df.bayes_V2[,7] * df.bayes_opt_response_V2[,4]+df.bayes_V2[,8] * df.bayes_opt_response_V2[,4])
df.mc_likelihood[,11] = as.data.frame(df.bayes_V2[,5] * df.bayes_opt_response_V2[,2]+df.bayes_V2[,6] * df.bayes_opt_response_V2[,2]+df.bayes_V2[,7] * df.bayes_opt_response_V2[,5]+df.bayes_V2[,8] * df.bayes_opt_response_V2[,5])
df.mc_likelihood[,12] = as.data.frame(df.bayes_V2[,5] * df.bayes_opt_response_V2[,3]+df.bayes_V2[,6] * df.bayes_opt_response_V2[,3]+df.bayes_V2[,7] * df.bayes_opt_response_V2[,6]+df.bayes_V2[,8] * df.bayes_opt_response_V2[,6])
#### IMPORTNAT: in the opt_resposne dataframe, moving up and moving down is confused!!!
df.mc_fitted_combined_V2 <- data.frame(matrix("", nrow = 1, ncol = 86))
for (i in 1:86){
df.mc_fitted_combined_V2[,i] = as.numeric(df.mc_fitted_combined_V2[,i])
}
colnames (df.mc_fitted_combined_V2) <- c("baseline_MLE",
"baseline_BIC",
"PID_softmax",
"PID_lazyness",
"PID_stickiness",
"PID_lazyness_logit",
"PID_stickiness_logit",
"PID_MLE",
"PID_BIC",
"lazy_softmax",
"lazy_lazyness",
"lazy_stickiness",
"lazy_lazyness_logit",
"lazy_stickiness_logit",
"lazy_MLE",
"lazy_BIC",
"bayes_softmax",
"bayes_lazyness",
"bayes_stickiness",
"bayes_lazyness_logit",
"bayes_stickiness_logit",
"bayes_MLE",
"bayes_BIC",
"02_PID_softmax",
"02_PID_lazyness",
"02_PID_stickiness",
"02_PID_lazyness_logit",
"02_PID_stickiness_logit",
"02_PID_MLE",
"02_PID_BIC",
"02_lazy_softmax",
"02_lazy_lazyness",
"02_lazy_stickiness",
"02_lazy_lazyness_logit",
"02_lazy_stickiness_logit",
"02_lazy_MLE",
"02_lazy_BIC",
"02_bayes_softmax",
"02_bayes_lazyness",
"02_bayes_stickiness",
"02_bayes_lazyness_logit",
"02_bayes_stickiness_logit",
"02_bayes_MLE",
"02_bayes_BIC",
"03_PID_softmax",
"03_PID_lazyness",
"03_PID_stickiness",
"03_PID_lazyness_logit",
"03_PID_stickiness_logit",
"03_PID_MLE",
"03_PID_BIC",
"03_lazy_softmax",
"03_lazy_lazyness",
"03_lazy_stickiness",
"03_lazy_lazyness_logit",
"03_lazy_stickiness_logit",
"03_lazy_MLE",
"03_lazy_BIC",
"03_bayes_softmax",
"03_bayes_lazyness",
"03_bayes_stickiness",
"03_bayes_lazyness_logit",
"03_bayes_stickiness_logit",
"03_bayes_MLE",
"03_bayes_BIC",
"04_PID_softmax",
"04_PID_lazyness",
"04_PID_stickiness",
"04_PID_lazyness_logit",
"04_PID_stickiness_logit",
"04_PID_MLE",
"04_PID_BIC",
"04_lazy_softmax",
"04_lazy_lazyness",
"04_lazy_stickiness",
"04_lazy_lazyness_logit",
"04_lazy_stickiness_logit",
"04_lazy_MLE",
"04_lazy_BIC",
"04_bayes_softmax",
"04_bayes_lazyness",
"04_bayes_stickiness",
"04_bayes_lazyness_logit",
"04_bayes_stickiness_logit",
"04_bayes_MLE",
"04_bayes_BIC")
###########################
#baseline
###########################
df.mc_fitted_combined_V2[,1] = log (1/3)*4000*34
df.mc_fitted_combined_V2[,2] = -2*df.mc_fitted_combined_V2[,1] + log(4000)
###########################
# optimizing the models
###########################
'''
j is the participants, set to 1:44
k is the model
0 is PID
1 is lazy
2 is Bayes
'''
for (k in model_selection){
l = (4+k*3): (6+k*3)
print ("k // the model")
print (k)
for (j in 1:1){ #participants
start = 1
end = length(df.results_V2[,1])
fn <- function(param) {
data = df.mc_likelihood[start:end, l]
data_previous = rbind (df.mc_likelihood[start, l],df.mc_likelihood[start:(end-1), l])
p = softmax (data,param[1])
p = lazyness (p, param[2])
p = stickiness(p, data_previous, param[3])
p = (df.mc_likelihood[start:end, 1:3] * p)
return (sum (log (rowSums(p))))
}
fn_02 <- function(param) {
data = df.mc_likelihood[start:end, l]
p = softmax (data,param[1])
p = lazyness (p, param[2])
p = (df.mc_likelihood[start:end, 1:3] * p)
return (sum (log (rowSums(p))))
}
fn_03 <- function(param) {
data = df.mc_likelihood[start:end, l]
data_previous = rbind (df.mc_likelihood[start, l],df.mc_likelihood[start:(end-1), l])
p = softmax (data,param[1])
p = stickiness(p, data_previous, param[2])
p = (df.mc_likelihood[start:end, 1:3] * p)
return (sum (log (rowSums(p))))
}
fn_04 <- function(param) {
data = df.mc_likelihood[start:end, l]
p = softmax (data,param[1])
p = (df.mc_likelihood[start:end, 1:3] * p)
return (sum (log (rowSums(p))))
}
opt_param = optim((c(start_softmax,start_lazy_sticky,start_lazy_sticky)), fn, control = list(fnscale=fnscale))
df.mc_fitted_combined_V2[j,(3+k*7):(5+k*7)] = t(data.frame(opt_param[1]))
df.mc_fitted_combined_V2[j,(8+k*7)] = opt_param[2] #MLE
print ("model 1 complete")
#model 2 // softmaz and lazyness
opt_param = optim((c(start_softmax,start_lazy_sticky)), fn_02, control = list(fnscale=fnscale))
df.mc_fitted_combined_V2[j,(3+k*7+21):(4+k*7+21)] = t(data.frame(opt_param[1]))
df.mc_fitted_combined_V2[j,(8+k*7+21)] = opt_param[2] #MLE
print ("model 2 complete")
#model 3 // softmax and stickyness
opt_param = optim((c(start_softmax,start_lazy_sticky)), fn_03, control = list(fnscale=fnscale))
df.mc_fitted_combined_V2[j,c((3+k*7+42),(5+k*7+42))] = t(data.frame(opt_param[1]))
df.mc_fitted_combined_V2[j,(8+k*7+42)] = opt_param[2] #MLE
print ("model 3 complete")
#model 4 // only softmax
opt_param = optim((c(start_softmax)), fn_04, method="Brent", lower=0, upper=1000, control = list(fnscale=fnscale))
df.mc_fitted_combined_V2[j,(3+k*7+63)] = t(data.frame(opt_param[1]))
df.mc_fitted_combined_V2[j,(8+k*7+63)] = opt_param[2] #MLE
print ("model 4 complete")
}
for (m in 0:3){
df.mc_fitted_combined_V2[,(6+k*7+21*m):(7+k*7+21*m)] = logit (df.mc_fitted_combined_V2[,(4+k*7+21*m):(5+k*7+21*m)])
}
}
####################################
# computing BIC score
####################################
for (k in 0:2){
m= 0; pun = 3; df.mc_fitted_combined_V2[,(9+k*7+21*m)] = -2*df.mc_fitted_combined_V2[,(8+k*7+21*m)]+3*log(4000)
m= 1; pun = 2; df.mc_fitted_combined_V2[,(9+k*7+21*m)] = -2*df.mc_fitted_combined_V2[,(8+k*7+21*m)]+pun*log(4000)
m= 2; pun = 2; df.mc_fitted_combined_V2[,(9+k*7+21*m)] = -2*df.mc_fitted_combined_V2[,(8+k*7+21*m)]+pun*log(4000)
m= 3; pun = 1; df.mc_fitted_combined_V2[,(9+k*7+21*m)] = -2*df.mc_fitted_combined_V2[,(8+k*7+21*m)]+pun*log(4000)
}
####################################
# computing accuracy
# a bit of a weired measure... maybe take out?
####################################
frames = 4000*34
a=1:frames
print (sum (df.mc_likelihood[a,1:3] *df.mc_likelihood[a,4:6])/frames)
print (sum (df.mc_likelihood[a,1:3] *df.mc_likelihood[a,7:9])/frames)
print (sum (df.mc_likelihood[a,1:3] *df.mc_likelihood[a,10:12])/frames)
rm(a, frames)
| /mc_model_fitting_combined_V2.R | no_license | jonasmago/med-cause | R | false | false | 11,011 | r | start_softmax = 0.3
start_lazy_sticky = -1
model_selection = 0:2
participant_selection = 1:1
fnscale = -1
################################
# functions
################################
logit <- function (a){
exp(a)/(exp(a)+1)
}
logit_reverse <- function (y){
log(y/(1-y))
}
softmax <- function (raw, temperature){
exp(raw/temperature)/rowSums(exp(raw/temperature))
}
lazyness <- function (raw, lazyness_param){
lazyness_logit = logit (lazyness_param)
sweep(raw*(1-lazyness_logit), 2, c(0,0,1)*lazyness_logit,"+")
}
stickiness <- function (raw, raw_previous, stickiness_param){
stickiness_logit = logit(stickiness_param)
raw*(1-stickiness_logit)+raw_previous*stickiness_logit
}
################################
# setting up df
################################
df.mc_likelihood <- data.frame(matrix("", nrow = (length(df.results_V2[,2])), ncol = 12))
colnames (df.mc_likelihood) <- c("real_up", "real_down","real_stay","PID_up","PID_down","PID_stay","lazy_up","lazy_down","lazy_stay", "bayes_up","bayes_down","bayes_stay")
# intervention:
# 1 = up, 3=down, 0=stay
df.mc_likelihood[,1] = as.data.frame(ifelse (df.results_V2[,3]==1, 1, 0))
df.mc_likelihood[,2]= as.data.frame(ifelse (df.results_V2[,3]==3, 1, 0))
df.mc_likelihood[,3] = as.data.frame(ifelse (df.results_V2[,3]==0, 1, 0))
df.mc_likelihood[,4] = ifelse ((df.results_V2[,2]-df.results_V2[,5]) < 0, 1,0) #below (press up)
df.mc_likelihood[,5] = ifelse ((df.results_V2[,2]-df.results_V2[,5]) > 0, 1,0) #above (press down)
df.mc_likelihood[,6] = ifelse ((df.results_V2[,2]-df.results_V2[,5]) == 0, 1,0)
df.mc_likelihood[,7] =as.data.frame(rep(0,176000))
df.mc_likelihood[,8] = as.data.frame(rep(0,176000))
df.mc_likelihood[,9] = as.data.frame(rep(1,176000))
df.mc_likelihood[,10] = as.data.frame(df.bayes_V2[,5] * df.bayes_opt_response_V2[,1]+df.bayes_V2[,6] * df.bayes_opt_response_V2[,1]+df.bayes_V2[,7] * df.bayes_opt_response_V2[,4]+df.bayes_V2[,8] * df.bayes_opt_response_V2[,4])
df.mc_likelihood[,11] = as.data.frame(df.bayes_V2[,5] * df.bayes_opt_response_V2[,2]+df.bayes_V2[,6] * df.bayes_opt_response_V2[,2]+df.bayes_V2[,7] * df.bayes_opt_response_V2[,5]+df.bayes_V2[,8] * df.bayes_opt_response_V2[,5])
df.mc_likelihood[,12] = as.data.frame(df.bayes_V2[,5] * df.bayes_opt_response_V2[,3]+df.bayes_V2[,6] * df.bayes_opt_response_V2[,3]+df.bayes_V2[,7] * df.bayes_opt_response_V2[,6]+df.bayes_V2[,8] * df.bayes_opt_response_V2[,6])
#### IMPORTNAT: in the opt_resposne dataframe, moving up and moving down is confused!!!
df.mc_fitted_combined_V2 <- data.frame(matrix("", nrow = 1, ncol = 86))
for (i in 1:86){
df.mc_fitted_combined_V2[,i] = as.numeric(df.mc_fitted_combined_V2[,i])
}
colnames (df.mc_fitted_combined_V2) <- c("baseline_MLE",
"baseline_BIC",
"PID_softmax",
"PID_lazyness",
"PID_stickiness",
"PID_lazyness_logit",
"PID_stickiness_logit",
"PID_MLE",
"PID_BIC",
"lazy_softmax",
"lazy_lazyness",
"lazy_stickiness",
"lazy_lazyness_logit",
"lazy_stickiness_logit",
"lazy_MLE",
"lazy_BIC",
"bayes_softmax",
"bayes_lazyness",
"bayes_stickiness",
"bayes_lazyness_logit",
"bayes_stickiness_logit",
"bayes_MLE",
"bayes_BIC",
"02_PID_softmax",
"02_PID_lazyness",
"02_PID_stickiness",
"02_PID_lazyness_logit",
"02_PID_stickiness_logit",
"02_PID_MLE",
"02_PID_BIC",
"02_lazy_softmax",
"02_lazy_lazyness",
"02_lazy_stickiness",
"02_lazy_lazyness_logit",
"02_lazy_stickiness_logit",
"02_lazy_MLE",
"02_lazy_BIC",
"02_bayes_softmax",
"02_bayes_lazyness",
"02_bayes_stickiness",
"02_bayes_lazyness_logit",
"02_bayes_stickiness_logit",
"02_bayes_MLE",
"02_bayes_BIC",
"03_PID_softmax",
"03_PID_lazyness",
"03_PID_stickiness",
"03_PID_lazyness_logit",
"03_PID_stickiness_logit",
"03_PID_MLE",
"03_PID_BIC",
"03_lazy_softmax",
"03_lazy_lazyness",
"03_lazy_stickiness",
"03_lazy_lazyness_logit",
"03_lazy_stickiness_logit",
"03_lazy_MLE",
"03_lazy_BIC",
"03_bayes_softmax",
"03_bayes_lazyness",
"03_bayes_stickiness",
"03_bayes_lazyness_logit",
"03_bayes_stickiness_logit",
"03_bayes_MLE",
"03_bayes_BIC",
"04_PID_softmax",
"04_PID_lazyness",
"04_PID_stickiness",
"04_PID_lazyness_logit",
"04_PID_stickiness_logit",
"04_PID_MLE",
"04_PID_BIC",
"04_lazy_softmax",
"04_lazy_lazyness",
"04_lazy_stickiness",
"04_lazy_lazyness_logit",
"04_lazy_stickiness_logit",
"04_lazy_MLE",
"04_lazy_BIC",
"04_bayes_softmax",
"04_bayes_lazyness",
"04_bayes_stickiness",
"04_bayes_lazyness_logit",
"04_bayes_stickiness_logit",
"04_bayes_MLE",
"04_bayes_BIC")
###########################
#baseline
###########################
df.mc_fitted_combined_V2[,1] = log (1/3)*4000*34
df.mc_fitted_combined_V2[,2] = -2*df.mc_fitted_combined_V2[,1] + log(4000)
###########################
# optimizing the models
###########################
'''
j is the participants, set to 1:44
k is the model
0 is PID
1 is lazy
2 is Bayes
'''
for (k in model_selection){
l = (4+k*3): (6+k*3)
print ("k // the model")
print (k)
for (j in 1:1){ #participants
start = 1
end = length(df.results_V2[,1])
fn <- function(param) {
data = df.mc_likelihood[start:end, l]
data_previous = rbind (df.mc_likelihood[start, l],df.mc_likelihood[start:(end-1), l])
p = softmax (data,param[1])
p = lazyness (p, param[2])
p = stickiness(p, data_previous, param[3])
p = (df.mc_likelihood[start:end, 1:3] * p)
return (sum (log (rowSums(p))))
}
fn_02 <- function(param) {
data = df.mc_likelihood[start:end, l]
p = softmax (data,param[1])
p = lazyness (p, param[2])
p = (df.mc_likelihood[start:end, 1:3] * p)
return (sum (log (rowSums(p))))
}
fn_03 <- function(param) {
data = df.mc_likelihood[start:end, l]
data_previous = rbind (df.mc_likelihood[start, l],df.mc_likelihood[start:(end-1), l])
p = softmax (data,param[1])
p = stickiness(p, data_previous, param[2])
p = (df.mc_likelihood[start:end, 1:3] * p)
return (sum (log (rowSums(p))))
}
fn_04 <- function(param) {
data = df.mc_likelihood[start:end, l]
p = softmax (data,param[1])
p = (df.mc_likelihood[start:end, 1:3] * p)
return (sum (log (rowSums(p))))
}
opt_param = optim((c(start_softmax,start_lazy_sticky,start_lazy_sticky)), fn, control = list(fnscale=fnscale))
df.mc_fitted_combined_V2[j,(3+k*7):(5+k*7)] = t(data.frame(opt_param[1]))
df.mc_fitted_combined_V2[j,(8+k*7)] = opt_param[2] #MLE
print ("model 1 complete")
#model 2 // softmaz and lazyness
opt_param = optim((c(start_softmax,start_lazy_sticky)), fn_02, control = list(fnscale=fnscale))
df.mc_fitted_combined_V2[j,(3+k*7+21):(4+k*7+21)] = t(data.frame(opt_param[1]))
df.mc_fitted_combined_V2[j,(8+k*7+21)] = opt_param[2] #MLE
print ("model 2 complete")
#model 3 // softmax and stickyness
opt_param = optim((c(start_softmax,start_lazy_sticky)), fn_03, control = list(fnscale=fnscale))
df.mc_fitted_combined_V2[j,c((3+k*7+42),(5+k*7+42))] = t(data.frame(opt_param[1]))
df.mc_fitted_combined_V2[j,(8+k*7+42)] = opt_param[2] #MLE
print ("model 3 complete")
#model 4 // only softmax
opt_param = optim((c(start_softmax)), fn_04, method="Brent", lower=0, upper=1000, control = list(fnscale=fnscale))
df.mc_fitted_combined_V2[j,(3+k*7+63)] = t(data.frame(opt_param[1]))
df.mc_fitted_combined_V2[j,(8+k*7+63)] = opt_param[2] #MLE
print ("model 4 complete")
}
for (m in 0:3){
df.mc_fitted_combined_V2[,(6+k*7+21*m):(7+k*7+21*m)] = logit (df.mc_fitted_combined_V2[,(4+k*7+21*m):(5+k*7+21*m)])
}
}
####################################
# computing BIC score
####################################
for (k in 0:2){
m= 0; pun = 3; df.mc_fitted_combined_V2[,(9+k*7+21*m)] = -2*df.mc_fitted_combined_V2[,(8+k*7+21*m)]+3*log(4000)
m= 1; pun = 2; df.mc_fitted_combined_V2[,(9+k*7+21*m)] = -2*df.mc_fitted_combined_V2[,(8+k*7+21*m)]+pun*log(4000)
m= 2; pun = 2; df.mc_fitted_combined_V2[,(9+k*7+21*m)] = -2*df.mc_fitted_combined_V2[,(8+k*7+21*m)]+pun*log(4000)
m= 3; pun = 1; df.mc_fitted_combined_V2[,(9+k*7+21*m)] = -2*df.mc_fitted_combined_V2[,(8+k*7+21*m)]+pun*log(4000)
}
####################################
# computing accuracy
# a bit of a weired measure... maybe take out?
####################################
frames = 4000*34
a=1:frames
print (sum (df.mc_likelihood[a,1:3] *df.mc_likelihood[a,4:6])/frames)
print (sum (df.mc_likelihood[a,1:3] *df.mc_likelihood[a,7:9])/frames)
print (sum (df.mc_likelihood[a,1:3] *df.mc_likelihood[a,10:12])/frames)
rm(a, frames)
|
arrests <- USArrests
rownames(USArrests)
rownames(cleanCensus) <- cleanCensus$stateName
rownames(cleanCensus)
str(cleanCensus)
censusArrests <- merge(cleanCensus,arrests,by= "row.names", all= TRUE)
head(censusArrests)
censusArrestsFinal <-censusArrests[,-1]
head(censusArrestsFinal,10)
| /MergeArrests.R | no_license | fall2018-saltz/xiaoyan_hw06 | R | false | false | 290 | r |
arrests <- USArrests
rownames(USArrests)
rownames(cleanCensus) <- cleanCensus$stateName
rownames(cleanCensus)
str(cleanCensus)
censusArrests <- merge(cleanCensus,arrests,by= "row.names", all= TRUE)
head(censusArrests)
censusArrestsFinal <-censusArrests[,-1]
head(censusArrestsFinal,10)
|
library(analysisPipelines)
### Name: getTerm
### Title: Obtains the dependency term from the formula
### Aliases: getTerm
### ** Examples
library(analysisPipelines)
getTerm(y ~ x)
| /data/genthat_extracted_code/analysisPipelines/examples/getTerm.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 187 | r | library(analysisPipelines)
### Name: getTerm
### Title: Obtains the dependency term from the formula
### Aliases: getTerm
### ** Examples
library(analysisPipelines)
getTerm(y ~ x)
|
corr <- function(directory, threshold = 0) {
# Change working directory to where the file are located
setwd("C:/Users/vipl/rprog-001")
setwd(directory)
# get the list of files in the directory
files <- list.files()
nitrate <- "nitrate"
sulfate <- "sulfate"
# vector to hold the "cor"
corvec <- c()
for (file in files) {
# for each of the file in the "directory" get the number of
# complete observations. Had no choice but to read the completly
# to determine the count. Not sure if there is "memory" efficient
# approach.
data <- read.csv(file)
obscount <- nrow(data[complete.cases(data),])
# "cor" only if the no of complete observations above the threshold
if( obscount > threshold) {
cor <- cor(data[complete.cases(data),nitrate],
data[complete.cases(data),sulfate])
corvec <- append(corvec, cor)
}
}
return (corvec)
} | /corr.R | no_license | rajpurush/rprog-001 | R | false | false | 985 | r | corr <- function(directory, threshold = 0) {
# Change working directory to where the file are located
setwd("C:/Users/vipl/rprog-001")
setwd(directory)
# get the list of files in the directory
files <- list.files()
nitrate <- "nitrate"
sulfate <- "sulfate"
# vector to hold the "cor"
corvec <- c()
for (file in files) {
# for each of the file in the "directory" get the number of
# complete observations. Had no choice but to read the completly
# to determine the count. Not sure if there is "memory" efficient
# approach.
data <- read.csv(file)
obscount <- nrow(data[complete.cases(data),])
# "cor" only if the no of complete observations above the threshold
if( obscount > threshold) {
cor <- cor(data[complete.cases(data),nitrate],
data[complete.cases(data),sulfate])
corvec <- append(corvec, cor)
}
}
return (corvec)
} |
options(
blogdown.generator = "jekyll",
blogdown.method = "custom",
blogdown.subdir = "_posts"
)
start_my_server <- function(){
blogdown::serve_site(host='0.0.0.0',port=4000)
}
| /.Rprofile | permissive | deargle/deargle.github.io | R | false | false | 188 | rprofile | options(
blogdown.generator = "jekyll",
blogdown.method = "custom",
blogdown.subdir = "_posts"
)
start_my_server <- function(){
blogdown::serve_site(host='0.0.0.0',port=4000)
}
|
#Applied Machine Learning for Health Data
#Gene Data Clustering Analysis
#Name: Ivy Fong
#Date: December 20, 2018
#load packages to be used
library(cluster)
#set working directory
setwd("C:/Users/ivyfo/Dropbox/Master of Public Health/Master of Public Health - Courses/Fall 2018 - Courses/CHL7001 - Machine Learning/CHL7001 - Assignments/CHL7001 A3")
#set seed
set.seed(123)
#load data and create dataset d without missing values
d <- read.csv("Data_Cortex_Nuclear.csv", header=T, na.strings="?") #read csv data into R, specify variable names in header, return ? for missing values
d <- na.omit(d) #only keep observations with complete information
summary(d) #print summary of d dataset
#subset data
d.data <- d[,2:78] #create subset d.data gene expression variables
d.labels <- d[,82] #create subset d.labels with class variable
summary(d.data) #print summary of d.data dataset
summary(d.labels) #print summary of d.labels dataset
#calculate mean and variance of the gene expression variables for each observation
apply(d.data, 2, mean) #print mean of gene variables
apply(d.data, 2, var) #print variance of gene variables
#scale of features looks similar - will not scale data
#PAM
#run PAM algorithm for different number of clusters and compare the associated silhouette widths
sil_width <- c() #create sil_width vector
for(i in 1:9){
pam_fit <- pam(d.data, k=i+1) #run PAM algorithm for 2-10 clusters
sil_width[i] <- pam_fit$silinfo$avg.width #extract average silhouette width from silinfo$avg.width to build vector
}
plot(2:10, sil_width, #plot sihouette width (higher is better)
xlab = "Number of clusters",
ylab = "Silhouette Width")
lines(2:10, sil_width) #highest silhouette width with 3 clusters
#apply PAM to partition gene data into 3 clusters around medoids - a more robust version of K-means
pam <- pam(d.data, 3) #apply PAM to gene data with k = 3 clusters
pam$clusinfo #print PAM cluster information including cluster size = 169 220 163
pam$silinfo$clus.avg.widths #print cluster average silhouette widths = 0.1851879 0.3212409 0.2780063
pam$silinfo$avg.width #print average silhouette width = 0.2668203
#create tables to present the distribution of outcomes across clusters
table(pam$cluster,d.labels) #create table of clusters by class
table(pam$cluster,d$Genotype) #create table of clusters by genotype
table(pam$cluster,d$Treatment) #create table of clusters by treatment
table(pam$cluster,d$Behavior) #create table of clusters by behaviour
#create silhouette plot
si <- silhouette(pam) #use silhouette function to compute silhouette information for PAM clustering
plot(si) #generate silhouette plot
#K-means
#run K-means algorithm for different number of clusters and compare the associated silhouette widths
km2 <- kmeans(d.data, centers=2, nstart=20)
si.km2 <- silhouette(km2$cluster, dist(d.data))
summary(si.km2) #k = 2, silhouette width = 0.2752
km3 <- kmeans(d.data, centers=3, nstart=20)
si.km3 <- silhouette(km3$cluster, dist(d.data))
summary(si.km3) #k = 3, silhouette width = 0.27490
km4 <- kmeans(d.data, centers=4, nstart=20)
si.km4 <- silhouette(km4$cluster, dist(d.data))
summary(si.km4) #k = 4, silhouette width = 0.21944
km5 <- kmeans(d.data, centers=5, nstart=20)
si.km5 <- silhouette(km5$cluster, dist(d.data))
summary(si.km5) #k = 5, silhouette width = 0.20543
km6 <- kmeans(d.data, centers=6, nstart=20)
si.km6 <- silhouette(km6$cluster, dist(d.data))
summary(si.km6) #k = 6, silhouette width = 0.21206
km7 <- kmeans(d.data, centers=7, nstart=20)
si.km7 <- silhouette(km7$cluster, dist(d.data))
summary(si.km7) #k = 7, silhouette width = 0.22344
km8 <- kmeans(d.data, centers=8, nstart=20)
si.km8 <- silhouette(km8$cluster, dist(d.data))
summary(si.km8) #k = 8, silhouette width = 0.2123
km9 <- kmeans(d.data, centers=9, nstart=20)
si.km9 <- silhouette(km9$cluster, dist(d.data))
summary(si.km9) #k = 9, silhouette width = 0.21825
km10 <- kmeans(d.data, centers=10, nstart=20)
si.km10 <- silhouette(km10$cluster, dist(d.data))
summary(si.km10) #k = 10, silhouette width = 0.2085
#create sil_width2 vector with silhouette widths for K-means clustering with 2 to 10 clusters
sil_width2 <- c(0.2752, 0.27490, 0.21944, 0.20543, 0.21206, 0.22344, 0.2123, 0.21825, 0.2085)
#plot sihouette width (higher is better)
plot(2:10, sil_width2,
xlab = "Number of clusters",
ylab = "Silhouette Width")
lines(2:10, sil_width2) #highest silhouette width with 2 clusters
#apply K-means to partition gene data into 2 clusters around centroids
km <- kmeans(d.data, centers=2, nstart=20) #perform K-means clustering 20 times, create 2 clusters, pick the best solution
km$iter #print number of iterations needed to find solution = 1
#create tables to present the distribution of outcomes across clusters
table(km$cluster,d.labels) #create table of clusters by class
table(km$cluster,d$Genotype) #create table of clusters by genotype
table(km$cluster,d$Treatment) #create table of clusters by treatment
table(km$cluster,d$Behavior) #create table of clusters by behaviour
#no distinct clusters by genotype, behaviour, treatment, or class
#create silhouette plot
si2 <- silhouette(km$cluster, dist(d.data)) #compute silhouette information, dist matrix is needed - dist outputs the distance between 1st and 2nd point and so on
summary(si2) #print silhouette information summary
#cluster size = 265 287
#cluster average silhouette widths = 0.2094640 0.3359831
#average silhouette width = 0.2752
plot(si2) #generate silhouette plot
#PCA
#perform PCA
pr.out <- prcomp(d.data, scale=F) #perform PCA on gene data, don't scale the data
pr.out$rotation[,1] #print pc1 loadings - except NR2A_N, ERK_N, pCAMKII_N, Bcatenin_N, loadings > 0.2, all other pc1 loadings are close to 0
pr.out$rotation[,2] #print pc2 loadings - except NR2A_N, ERK_N, pCAMKII_N loadings > 0.2, all other pc2 loadings are close to 0
#create biplot
biplot(pr.out, scale=0) #generate biplot, plotting together the points and the features based on the first 2 pc's
#calculate pve = proportion of variance explained by each component
pr.var <- pr.out$sdev^2
pve <- pr.var/sum(pr.var)
pve #print proportion of variance explained by each component
#plot proportion of variance explained
plot(pve, xlab="Principal Component", ylab="Proportion of
Variance Explained ", ylim=c(0,1) ,type="b")
#plot cumulative proportion of variance explained
plot(cumsum(pve), xlab="Principal Component", ylab="
Cumulative Proportion of Variance Explained ", ylim=c(0,1) ,
type="b")
#cumsum function gives cumulative sums of previous units - proportion of variance explained
cumsum(pve) #print cumulative proportion of variance explained - first 6 pc's explain about 92% of the data variation
#colour-code PAM clusters on plot of pc1 vs. pc2
plot(pr.out$x[,1:2], col=4-as.numeric(pam$cluster)) #generate plot of pc2 vs. pc2 with colour-coded PAM clusters
legend("bottomright", legend=levels(as.factor(pam$cluster)), text.col=4-(1:3), y.intersp=0.8) #add legend with cluster ID
| /Gene Data Clustering Analysis.R | no_license | ivyfong/Gene-Data-Clustering | R | false | false | 7,018 | r | #Applied Machine Learning for Health Data
#Gene Data Clustering Analysis
#Name: Ivy Fong
#Date: December 20, 2018
#load packages to be used
library(cluster)
#set working directory
setwd("C:/Users/ivyfo/Dropbox/Master of Public Health/Master of Public Health - Courses/Fall 2018 - Courses/CHL7001 - Machine Learning/CHL7001 - Assignments/CHL7001 A3")
#set seed
set.seed(123)
#load data and create dataset d without missing values
d <- read.csv("Data_Cortex_Nuclear.csv", header=T, na.strings="?") #read csv data into R, specify variable names in header, return ? for missing values
d <- na.omit(d) #only keep observations with complete information
summary(d) #print summary of d dataset
#subset data
d.data <- d[,2:78] #create subset d.data gene expression variables
d.labels <- d[,82] #create subset d.labels with class variable
summary(d.data) #print summary of d.data dataset
summary(d.labels) #print summary of d.labels dataset
#calculate mean and variance of the gene expression variables for each observation
apply(d.data, 2, mean) #print mean of gene variables
apply(d.data, 2, var) #print variance of gene variables
#scale of features looks similar - will not scale data
#PAM
#run PAM algorithm for different number of clusters and compare the associated silhouette widths
sil_width <- c() #create sil_width vector
for(i in 1:9){
pam_fit <- pam(d.data, k=i+1) #run PAM algorithm for 2-10 clusters
sil_width[i] <- pam_fit$silinfo$avg.width #extract average silhouette width from silinfo$avg.width to build vector
}
plot(2:10, sil_width, #plot sihouette width (higher is better)
xlab = "Number of clusters",
ylab = "Silhouette Width")
lines(2:10, sil_width) #highest silhouette width with 3 clusters
#apply PAM to partition gene data into 3 clusters around medoids - a more robust version of K-means
pam <- pam(d.data, 3) #apply PAM to gene data with k = 3 clusters
pam$clusinfo #print PAM cluster information including cluster size = 169 220 163
pam$silinfo$clus.avg.widths #print cluster average silhouette widths = 0.1851879 0.3212409 0.2780063
pam$silinfo$avg.width #print average silhouette width = 0.2668203
#create tables to present the distribution of outcomes across clusters
table(pam$cluster,d.labels) #create table of clusters by class
table(pam$cluster,d$Genotype) #create table of clusters by genotype
table(pam$cluster,d$Treatment) #create table of clusters by treatment
table(pam$cluster,d$Behavior) #create table of clusters by behaviour
#create silhouette plot
si <- silhouette(pam) #use silhouette function to compute silhouette information for PAM clustering
plot(si) #generate silhouette plot
#K-means
#run K-means algorithm for different number of clusters and compare the associated silhouette widths
km2 <- kmeans(d.data, centers=2, nstart=20)
si.km2 <- silhouette(km2$cluster, dist(d.data))
summary(si.km2) #k = 2, silhouette width = 0.2752
km3 <- kmeans(d.data, centers=3, nstart=20)
si.km3 <- silhouette(km3$cluster, dist(d.data))
summary(si.km3) #k = 3, silhouette width = 0.27490
km4 <- kmeans(d.data, centers=4, nstart=20)
si.km4 <- silhouette(km4$cluster, dist(d.data))
summary(si.km4) #k = 4, silhouette width = 0.21944
km5 <- kmeans(d.data, centers=5, nstart=20)
si.km5 <- silhouette(km5$cluster, dist(d.data))
summary(si.km5) #k = 5, silhouette width = 0.20543
km6 <- kmeans(d.data, centers=6, nstart=20)
si.km6 <- silhouette(km6$cluster, dist(d.data))
summary(si.km6) #k = 6, silhouette width = 0.21206
km7 <- kmeans(d.data, centers=7, nstart=20)
si.km7 <- silhouette(km7$cluster, dist(d.data))
summary(si.km7) #k = 7, silhouette width = 0.22344
km8 <- kmeans(d.data, centers=8, nstart=20)
si.km8 <- silhouette(km8$cluster, dist(d.data))
summary(si.km8) #k = 8, silhouette width = 0.2123
km9 <- kmeans(d.data, centers=9, nstart=20)
si.km9 <- silhouette(km9$cluster, dist(d.data))
summary(si.km9) #k = 9, silhouette width = 0.21825
km10 <- kmeans(d.data, centers=10, nstart=20)
si.km10 <- silhouette(km10$cluster, dist(d.data))
summary(si.km10) #k = 10, silhouette width = 0.2085
#create sil_width2 vector with silhouette widths for K-means clustering with 2 to 10 clusters
sil_width2 <- c(0.2752, 0.27490, 0.21944, 0.20543, 0.21206, 0.22344, 0.2123, 0.21825, 0.2085)
#plot sihouette width (higher is better)
plot(2:10, sil_width2,
xlab = "Number of clusters",
ylab = "Silhouette Width")
lines(2:10, sil_width2) #highest silhouette width with 2 clusters
#apply K-means to partition gene data into 2 clusters around centroids
km <- kmeans(d.data, centers=2, nstart=20) #perform K-means clustering 20 times, create 2 clusters, pick the best solution
km$iter #print number of iterations needed to find solution = 1
#create tables to present the distribution of outcomes across clusters
table(km$cluster,d.labels) #create table of clusters by class
table(km$cluster,d$Genotype) #create table of clusters by genotype
table(km$cluster,d$Treatment) #create table of clusters by treatment
table(km$cluster,d$Behavior) #create table of clusters by behaviour
#no distinct clusters by genotype, behaviour, treatment, or class
#create silhouette plot
si2 <- silhouette(km$cluster, dist(d.data)) #compute silhouette information, dist matrix is needed - dist outputs the distance between 1st and 2nd point and so on
summary(si2) #print silhouette information summary
#cluster size = 265 287
#cluster average silhouette widths = 0.2094640 0.3359831
#average silhouette width = 0.2752
plot(si2) #generate silhouette plot
#PCA
#perform PCA
pr.out <- prcomp(d.data, scale=F) #perform PCA on gene data, don't scale the data
pr.out$rotation[,1] #print pc1 loadings - except NR2A_N, ERK_N, pCAMKII_N, Bcatenin_N, loadings > 0.2, all other pc1 loadings are close to 0
pr.out$rotation[,2] #print pc2 loadings - except NR2A_N, ERK_N, pCAMKII_N loadings > 0.2, all other pc2 loadings are close to 0
#create biplot
biplot(pr.out, scale=0) #generate biplot, plotting together the points and the features based on the first 2 pc's
#calculate pve = proportion of variance explained by each component
pr.var <- pr.out$sdev^2
pve <- pr.var/sum(pr.var)
pve #print proportion of variance explained by each component
#plot proportion of variance explained
plot(pve, xlab="Principal Component", ylab="Proportion of
Variance Explained ", ylim=c(0,1) ,type="b")
#plot cumulative proportion of variance explained
plot(cumsum(pve), xlab="Principal Component", ylab="
Cumulative Proportion of Variance Explained ", ylim=c(0,1) ,
type="b")
#cumsum function gives cumulative sums of previous units - proportion of variance explained
cumsum(pve) #print cumulative proportion of variance explained - first 6 pc's explain about 92% of the data variation
#colour-code PAM clusters on plot of pc1 vs. pc2
plot(pr.out$x[,1:2], col=4-as.numeric(pam$cluster)) #generate plot of pc2 vs. pc2 with colour-coded PAM clusters
legend("bottomright", legend=levels(as.factor(pam$cluster)), text.col=4-(1:3), y.intersp=0.8) #add legend with cluster ID
|
#Desktop
setwd('D:/Thanish/D/Thanish Folder/Compeditions/Facies')
train_prod = read.csv('facies_vectors.csv')
test_prod = read.csv('nofacies_data.csv')
#Converting the Facies column to factor
train_prod$Facies = as.factor(as.character(train_prod$Facies))
#Adding Facies to test_prod for merging
test_prod$Facies = NA
#Merging the train and test prod
train_test_prod = rbind(train_prod, test_prod)
#Feature engineering
train_test_prod$order = seq(1: nrow(train_test_prod))
#######
#Relpos next
train_test_prod_relpos = train_test_prod[,c('order', 'Well.Name','RELPOS')]
train_test_prod_relpos$order = train_test_prod_relpos$order + 1
names(train_test_prod_relpos) = c("order", 'Well.Name',"RELPOS_next")
train_test_prod = merge(train_test_prod, train_test_prod_relpos,
by.x = c('order','Well.Name'),
by.y = c('order','Well.Name'),
all.x = T)
train_test_prod$RELPOS_next = train_test_prod$RELPOS_next - train_test_prod$RELPOS
train_test_prod$order = NULL
######################################################################
#Multiclass F1
F1 = function(M)
{
precision = NULL
recall = NULL
for (i in 1:min(dim(M)))
{
precision[i] = M[i,i]/sum(M[,i])
recall[i] = M[i,i]/sum(M[i,])
}
F1 = 2*(precision*recall)/(precision+recall)
F1[is.na(F1)] = 0
return(sum(F1)/max(dim(M)))
}
######################################################################
#Converting the NM to 1 and 0
train_test_prod$NM_M = train_test_prod$NM_M-1
#Fillin up Relpos next with 0
train_test_prod$RELPOS_next[is.na(train_test_prod$RELPOS_next)] = 0
#######
#Removing the rows with NA in PE
train_test_prod = train_test_prod[!is.na(train_test_prod$PE),]
#######
#Splitting up the train an test prod
train_prod = train_test_prod[!is.na(train_test_prod$Facies),]
test_prod = train_test_prod[is.na(train_test_prod$Facies),]
test_prod$Facies = NULL
#Splitting into train and test local
train_local = train_prod[!train_prod$Well.Name %in% c('SHRIMPLIN'),]
test_local = train_prod[train_prod$Well.Name %in% c('SHRIMPLIN'),]
#====================================================================================================
#Deep Learning
#H2o model
library(h2o)
start.h2o = h2o.init(nthreads = -1)
train_local_h2o = as.h2o(train_local[!colnames(train_local) %in% c('Well.Name')])
test_local_h2o = as.h2o(test_local)
test_prod_h2o = as.h2o(test_prod)
x.indep = colnames(train_local_h2o[,!colnames(train_local_h2o) %in% c('Facies')])
y.dep = 'Facies'
set.seed(100)
DL.local.model.h2o = h2o.deeplearning(y = y.dep, x=x.indep, training_frame = train_local_h2o,
overwrite_with_best_model = T,standardize = T,
hidden = c(100, 800))
DL.local.pred.h2o = h2o.predict(DL.local.model.h2o, type='class',newdata = test_local_h2o)
DL.local.pred.h2o = as.data.frame(DL.local.pred.h2o)
DL.local.pred.h2o$predict = factor(as.character(DL.local.pred.h2o$predict), levels = levels(test_local$Facies))
acc_table_DL_h2o = table(DL.local.pred.h2o$predict, test_local$Facies)
acc_table_DL_h2o
acc_DL_h2o = sum(diag(acc_table_DL_h2o))/nrow(test_local)
acc_DL_h2o
F1(acc_table_DL_h2o)
#On prod
DL.prod.pred.h2o = h2o.predict(DL.local.model.h2o, type='class',newdata = test_prod_h2o)
DL.prod.pred.h2o = as.data.frame(DL.prod.pred.h2o)
#====================================================================================================
sub = cbind(test_prod, Facies = DL.prod.pred.h2o$predict)
write.csv(sub, row.names= F, 'NN_predicted_facies_2.csv')
| /Mendacium/NN_sub_2.R | permissive | yohanesnuwara/2016-ml-contest | R | false | false | 3,696 | r | #Desktop
setwd('D:/Thanish/D/Thanish Folder/Compeditions/Facies')
train_prod = read.csv('facies_vectors.csv')
test_prod = read.csv('nofacies_data.csv')
#Converting the Facies column to factor
train_prod$Facies = as.factor(as.character(train_prod$Facies))
#Adding Facies to test_prod for merging
test_prod$Facies = NA
#Merging the train and test prod
train_test_prod = rbind(train_prod, test_prod)
#Feature engineering
train_test_prod$order = seq(1: nrow(train_test_prod))
#######
#Relpos next
train_test_prod_relpos = train_test_prod[,c('order', 'Well.Name','RELPOS')]
train_test_prod_relpos$order = train_test_prod_relpos$order + 1
names(train_test_prod_relpos) = c("order", 'Well.Name',"RELPOS_next")
train_test_prod = merge(train_test_prod, train_test_prod_relpos,
by.x = c('order','Well.Name'),
by.y = c('order','Well.Name'),
all.x = T)
train_test_prod$RELPOS_next = train_test_prod$RELPOS_next - train_test_prod$RELPOS
train_test_prod$order = NULL
######################################################################
#Multiclass F1
F1 = function(M)
{
precision = NULL
recall = NULL
for (i in 1:min(dim(M)))
{
precision[i] = M[i,i]/sum(M[,i])
recall[i] = M[i,i]/sum(M[i,])
}
F1 = 2*(precision*recall)/(precision+recall)
F1[is.na(F1)] = 0
return(sum(F1)/max(dim(M)))
}
######################################################################
#Converting the NM to 1 and 0
train_test_prod$NM_M = train_test_prod$NM_M-1
#Fillin up Relpos next with 0
train_test_prod$RELPOS_next[is.na(train_test_prod$RELPOS_next)] = 0
#######
#Removing the rows with NA in PE
train_test_prod = train_test_prod[!is.na(train_test_prod$PE),]
#######
#Splitting up the train an test prod
train_prod = train_test_prod[!is.na(train_test_prod$Facies),]
test_prod = train_test_prod[is.na(train_test_prod$Facies),]
test_prod$Facies = NULL
#Splitting into train and test local
train_local = train_prod[!train_prod$Well.Name %in% c('SHRIMPLIN'),]
test_local = train_prod[train_prod$Well.Name %in% c('SHRIMPLIN'),]
#====================================================================================================
#Deep Learning
#H2o model
library(h2o)
start.h2o = h2o.init(nthreads = -1)
train_local_h2o = as.h2o(train_local[!colnames(train_local) %in% c('Well.Name')])
test_local_h2o = as.h2o(test_local)
test_prod_h2o = as.h2o(test_prod)
x.indep = colnames(train_local_h2o[,!colnames(train_local_h2o) %in% c('Facies')])
y.dep = 'Facies'
set.seed(100)
DL.local.model.h2o = h2o.deeplearning(y = y.dep, x=x.indep, training_frame = train_local_h2o,
overwrite_with_best_model = T,standardize = T,
hidden = c(100, 800))
DL.local.pred.h2o = h2o.predict(DL.local.model.h2o, type='class',newdata = test_local_h2o)
DL.local.pred.h2o = as.data.frame(DL.local.pred.h2o)
DL.local.pred.h2o$predict = factor(as.character(DL.local.pred.h2o$predict), levels = levels(test_local$Facies))
acc_table_DL_h2o = table(DL.local.pred.h2o$predict, test_local$Facies)
acc_table_DL_h2o
acc_DL_h2o = sum(diag(acc_table_DL_h2o))/nrow(test_local)
acc_DL_h2o
F1(acc_table_DL_h2o)
#On prod
DL.prod.pred.h2o = h2o.predict(DL.local.model.h2o, type='class',newdata = test_prod_h2o)
DL.prod.pred.h2o = as.data.frame(DL.prod.pred.h2o)
#====================================================================================================
sub = cbind(test_prod, Facies = DL.prod.pred.h2o$predict)
write.csv(sub, row.names= F, 'NN_predicted_facies_2.csv')
|
# Species accumulation curves
# Upload data and necessary packages
#https://cran.r-project.org/web/packages/iNEXT/vignettes/Introduction.html
source("data/data_processing.R")
library(iNEXT)
out <- iNEXT(stage1, q = c(0,1,2), datatype = "abundance")
ggiNEXT(out, type=3, facet.var = "site", color.var = "site")
| /data/accumulation_curves.R | no_license | szefer-piotr/sand_quarries | R | false | false | 315 | r | # Species accumulation curves
# Upload data and necessary packages
#https://cran.r-project.org/web/packages/iNEXT/vignettes/Introduction.html
source("data/data_processing.R")
library(iNEXT)
out <- iNEXT(stage1, q = c(0,1,2), datatype = "abundance")
ggiNEXT(out, type=3, facet.var = "site", color.var = "site")
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{theme}
\alias{theme}
\title{Set theme elements}
\usage{
theme(..., complete = FALSE)
}
\arguments{
\item{...}{a list of element name, element pairings that
modify the existing theme.}
\item{complete}{set this to TRUE if this is a complete
theme, such as the one returned \code{by theme_grey()}.
Complete themes behave differently when added to a ggplot
object.}
}
\description{
Use this function to modify theme settings.
}
\details{
Theme elements can inherit properties from other theme elements.
For example, \code{axis.title.x} inherits from \code{axis.title},
which in turn inherits from \code{text}. All text elements inherit
directly or indirectly from \code{text}; all lines inherit from
\code{line}, and all rectangular objects inherit from \code{rect}.
For more examples of modifying properties using inheritance, see
\code{\link{+.gg}} and \code{\link{\%+replace\%}}.
To see a graphical representation of the inheritance tree, see the
last example below.
}
\section{Theme elements}{
The individual theme elements are:
\tabular{ll}{ line \tab all line elements
(\code{element_line}) \cr rect \tab all rectangluar
elements (\code{element_rect}) \cr text \tab all text
elements (\code{element_text}) \cr title \tab all title
elements: plot, axes, legends (\code{element_text};
inherits from \code{text}) \cr
axis.title \tab label of axes (\code{element_text};
inherits from \code{text}) \cr axis.title.x \tab x axis
label (\code{element_text}; inherits from
\code{axis.title}) \cr axis.title.y \tab y axis label
(\code{element_text}; inherits from \code{axis.title})
\cr axis.text \tab tick labels along axes
(\code{element_text}; inherits from \code{text}) \cr
axis.text.x \tab x axis tick labels (\code{element_text};
inherits from \code{axis.text}) \cr axis.text.y \tab y
axis tick labels (\code{element_text}; inherits from
\code{axis.text}) \cr axis.ticks \tab tick marks along
axes (\code{element_line}; inherits from \code{line}) \cr
axis.ticks.x \tab x axis tick marks (\code{element_line};
inherits from \code{axis.ticks}) \cr axis.ticks.y \tab y
axis tick marks (\code{element_line}; inherits from
\code{axis.ticks}) \cr axis.ticks.length \tab length of
tick marks (\code{unit}) \cr axis.ticks.margin \tab space
between tick mark and tick label (\code{unit}) \cr
axis.line \tab lines along axes (\code{element_line};
inherits from \code{line}) \cr axis.line.x \tab line
along x axis (\code{element_line}; inherits from
\code{axis.line}) \cr axis.line.y \tab line along y axis
(\code{element_line}; inherits from \code{axis.line}) \cr
legend.background \tab background of legend
(\code{element_rect}; inherits from \code{rect}) \cr
legend.margin \tab extra space added around legend
(\code{unit}) \cr legend.key \tab background underneath
legend keys (\code{element_rect}; inherits from
\code{rect}) \cr legend.key.size \tab size of legend keys
(\code{unit}; inherits from \code{legend.key.size}) \cr
legend.key.height \tab key background height
(\code{unit}; inherits from \code{legend.key.size}) \cr
legend.key.width \tab key background width (\code{unit};
inherits from \code{legend.key.size}) \cr legend.text
\tab legend item labels (\code{element_text}; inherits
from \code{text}) \cr legend.text.align \tab alignment of
legend labels (number from 0 (left) to 1 (right)) \cr
legend.title \tab title of legend (\code{element_text};
inherits from \code{title}) \cr legend.title.align \tab
alignment of legend title (number from 0 (left) to 1
(right)) \cr legend.position \tab the position of legends
("none", "left", "right", "bottom", "top", or two-element
numeric vector) \cr legend.direction \tab layout of items
in legends ("horizontal" or "vertical") \cr
legend.justification \tab anchor point for positioning
legend inside plot ("center" or two-element numeric
vector) \cr legend.box \tab arrangement of multiple
legends ("horizontal" or "vertical") \cr legend.box.just
\tab justification of each legend within the overall
bounding box, when there are multiple legends ("top",
"bottom", "left", or "right")\cr
panel.background \tab background of plotting area, drawn
underneath plot (\code{element_rect}; inherits from
\code{rect}) \cr panel.border \tab border around plotting
area, drawn on top of plot so that it covers tick marks
and grid lines. This should be used with \code{fill=NA}
(\code{element_rect}; inherits from \code{rect}) \cr
panel.margin \tab margin around facet panels
(\code{unit}) \cr panel.grid \tab grid lines
(\code{element_line}; inherits from \code{line}) \cr
panel.grid.major \tab major grid lines
(\code{element_line}; inherits from \code{panel.grid})
\cr panel.grid.minor \tab minor grid lines
(\code{element_line}; inherits from \code{panel.grid})
\cr panel.grid.major.x \tab vertical major grid lines
(\code{element_line}; inherits from
\code{panel.grid.major}) \cr panel.grid.major.y \tab
horizontal major grid lines (\code{element_line};
inherits from \code{panel.grid.major}) \cr
panel.grid.minor.x \tab vertical minor grid lines
(\code{element_line}; inherits from
\code{panel.grid.minor}) \cr panel.grid.minor.y \tab
horizontal minor grid lines (\code{element_line};
inherits from \code{panel.grid.minor}) \cr
plot.background \tab background of the entire plot
(\code{element_rect}; inherits from \code{rect}) \cr
plot.title \tab plot title (text appearance)
(\code{element_text}; inherits from \code{title}) \cr
plot.margin \tab margin around entire plot (\code{unit}
with the sizes of the top, right, bottom, and left
margins) \cr
strip.background \tab background of facet labels
(\code{element_rect}; inherits from \code{rect}) \cr
strip.text \tab facet labels (\code{element_text};
inherits from \code{text}) \cr strip.text.x \tab facet
labels along horizontal direction (\code{element_text};
inherits from \code{strip.text}) \cr strip.text.y \tab
facet labels along vertical direction
(\code{element_text}; inherits from \code{strip.text})
\cr }
}
\examples{
\donttest{
p <- qplot(mpg, wt, data = mtcars)
p
p + theme(panel.background = element_rect(colour = "pink"))
p + theme_bw()
# Scatter plot of gas mileage by vehicle weight
p <- ggplot(mtcars, aes(x = wt, y = mpg)) + geom_point()
# Calculate slope and intercept of line of best fit
coef(lm(mpg ~ wt, data = mtcars))
p + geom_abline(intercept = 37, slope = -5)
# Calculate correlation coefficient
with(mtcars, cor(wt, mpg, use = "everything", method = "pearson"))
#annotate the plot
p + geom_abline(intercept = 37, slope = -5) +
geom_text(data = data.frame(), aes(4.5, 30, label = "Pearson-R = -.87"))
# Change the axis labels
# Original plot
p
p + xlab("Vehicle Weight") + ylab("Miles per Gallon")
# Or
p + labs(x = "Vehicle Weight", y = "Miles per Gallon")
# Change title appearance
p <- p + labs(title = "Vehicle Weight-Gas Mileage Relationship")
# Set title to twice the base font size
p + theme(plot.title = element_text(size = rel(2)))
p + theme(plot.title = element_text(size = rel(2), colour = "blue"))
# Changing plot look with themes
DF <- data.frame(x = rnorm(400))
m <- ggplot(DF, aes(x = x)) + geom_histogram()
# Default is theme_grey()
m
# Compare with
m + theme_bw()
# Manipulate Axis Attributes
library(grid) # for unit
m + theme(axis.line = element_line(size = 3, colour = "red", linetype = "dotted"))
m + theme(axis.text = element_text(colour = "blue"))
m + theme(axis.text.y = element_blank())
m + theme(axis.ticks = element_line(size = 2))
m + theme(axis.title.y = element_text(size = rel(1.5), angle = 90))
m + theme(axis.title.x = element_blank())
m + theme(axis.ticks.length = unit(.85, "cm"))
# Legend Attributes
z <- ggplot(mtcars, aes(wt, mpg, colour = factor(cyl))) + geom_point()
z
z + theme(legend.position = "none")
z + theme(legend.position = "bottom")
# Or use relative coordinates between 0 and 1
z + theme(legend.position = c(.5, .5))
z + theme(legend.background = element_rect(colour = "black"))
# Legend margin controls extra space around outside of legend:
z + theme(legend.background = element_rect(), legend.margin = unit(1, "cm"))
z + theme(legend.background = element_rect(), legend.margin = unit(0, "cm"))
# Or to just the keys
z + theme(legend.key = element_rect(colour = "black"))
z + theme(legend.key = element_rect(fill = "yellow"))
z + theme(legend.key.size = unit(2.5, "cm"))
z + theme(legend.text = element_text(size = 20, colour = "red", angle = 45))
z + theme(legend.title = element_text(face = "italic"))
# To change the title of the legend use the name argument
# in one of the scale options
z + scale_colour_brewer(name = "My Legend")
z + scale_colour_grey(name = "Number of \\nCylinders")
# Panel and Plot Attributes
z + theme(panel.background = element_rect(fill = "black"))
z + theme(panel.border = element_rect(linetype = "dashed", colour = "black"))
z + theme(panel.grid.major = element_line(colour = "blue"))
z + theme(panel.grid.minor = element_line(colour = "red", linetype = "dotted"))
z + theme(panel.grid.major = element_line(size = 2))
z + theme(panel.grid.major.y = element_blank(), panel.grid.minor.y = element_blank())
z + theme(plot.background = element_rect())
z + theme(plot.background = element_rect(fill = "green"))
# Faceting Attributes
set.seed(4940)
dsmall <- diamonds[sample(nrow(diamonds), 1000), ]
k <- ggplot(dsmall, aes(carat, ..density..)) + geom_histogram(binwidth = 0.2) +
facet_grid(. ~ cut)
k + theme(strip.background = element_rect(colour = "purple", fill = "pink",
size = 3, linetype = "dashed"))
k + theme(strip.text.x = element_text(colour = "red", angle = 45, size = 10,
hjust = 0.5, vjust = 0.5))
k + theme(panel.margin = unit(5, "lines"))
k + theme(panel.margin = unit(0, "lines"))
# Modify a theme and save it
mytheme <- theme_grey() + theme(plot.title = element_text(colour = "red"))
p + mytheme
## Run this to generate a graph of the element inheritance tree
build_element_graph <- function(tree) {
require(igraph)
require(plyr)
inheritdf <- function(name, item) {
if (length(item$inherit) == 0)
data.frame()
else
data.frame(child = name, parent = item$inherit)
}
edges <- rbind.fill(mapply(inheritdf, names(tree), tree))
# Explicitly add vertices (since not all are in edge list)
vertices <- data.frame(name = names(tree))
graph.data.frame(edges, vertices = vertices)
}
g <- build_element_graph(ggplot2:::.element_tree)
V(g)$label <- V(g)$name
set.seed(324)
par(mar=c(0,0,0,0)) # Remove unnecessary margins
plot(g, layout=layout.fruchterman.reingold, vertex.size=4, vertex.label.dist=.25)
}
}
\seealso{
\code{\link{+.gg}}
\code{\link{\%+replace\%}}
\code{\link{rel}}
}
| /man/theme.Rd | no_license | kevinushey/ggplot2 | R | false | false | 10,901 | rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{theme}
\alias{theme}
\title{Set theme elements}
\usage{
theme(..., complete = FALSE)
}
\arguments{
\item{...}{a list of element name, element pairings that
modify the existing theme.}
\item{complete}{set this to TRUE if this is a complete
theme, such as the one returned \code{by theme_grey()}.
Complete themes behave differently when added to a ggplot
object.}
}
\description{
Use this function to modify theme settings.
}
\details{
Theme elements can inherit properties from other theme elements.
For example, \code{axis.title.x} inherits from \code{axis.title},
which in turn inherits from \code{text}. All text elements inherit
directly or indirectly from \code{text}; all lines inherit from
\code{line}, and all rectangular objects inherit from \code{rect}.
For more examples of modifying properties using inheritance, see
\code{\link{+.gg}} and \code{\link{\%+replace\%}}.
To see a graphical representation of the inheritance tree, see the
last example below.
}
\section{Theme elements}{
The individual theme elements are:
\tabular{ll}{ line \tab all line elements
(\code{element_line}) \cr rect \tab all rectangluar
elements (\code{element_rect}) \cr text \tab all text
elements (\code{element_text}) \cr title \tab all title
elements: plot, axes, legends (\code{element_text};
inherits from \code{text}) \cr
axis.title \tab label of axes (\code{element_text};
inherits from \code{text}) \cr axis.title.x \tab x axis
label (\code{element_text}; inherits from
\code{axis.title}) \cr axis.title.y \tab y axis label
(\code{element_text}; inherits from \code{axis.title})
\cr axis.text \tab tick labels along axes
(\code{element_text}; inherits from \code{text}) \cr
axis.text.x \tab x axis tick labels (\code{element_text};
inherits from \code{axis.text}) \cr axis.text.y \tab y
axis tick labels (\code{element_text}; inherits from
\code{axis.text}) \cr axis.ticks \tab tick marks along
axes (\code{element_line}; inherits from \code{line}) \cr
axis.ticks.x \tab x axis tick marks (\code{element_line};
inherits from \code{axis.ticks}) \cr axis.ticks.y \tab y
axis tick marks (\code{element_line}; inherits from
\code{axis.ticks}) \cr axis.ticks.length \tab length of
tick marks (\code{unit}) \cr axis.ticks.margin \tab space
between tick mark and tick label (\code{unit}) \cr
axis.line \tab lines along axes (\code{element_line};
inherits from \code{line}) \cr axis.line.x \tab line
along x axis (\code{element_line}; inherits from
\code{axis.line}) \cr axis.line.y \tab line along y axis
(\code{element_line}; inherits from \code{axis.line}) \cr
legend.background \tab background of legend
(\code{element_rect}; inherits from \code{rect}) \cr
legend.margin \tab extra space added around legend
(\code{unit}) \cr legend.key \tab background underneath
legend keys (\code{element_rect}; inherits from
\code{rect}) \cr legend.key.size \tab size of legend keys
(\code{unit}; inherits from \code{legend.key.size}) \cr
legend.key.height \tab key background height
(\code{unit}; inherits from \code{legend.key.size}) \cr
legend.key.width \tab key background width (\code{unit};
inherits from \code{legend.key.size}) \cr legend.text
\tab legend item labels (\code{element_text}; inherits
from \code{text}) \cr legend.text.align \tab alignment of
legend labels (number from 0 (left) to 1 (right)) \cr
legend.title \tab title of legend (\code{element_text};
inherits from \code{title}) \cr legend.title.align \tab
alignment of legend title (number from 0 (left) to 1
(right)) \cr legend.position \tab the position of legends
("none", "left", "right", "bottom", "top", or two-element
numeric vector) \cr legend.direction \tab layout of items
in legends ("horizontal" or "vertical") \cr
legend.justification \tab anchor point for positioning
legend inside plot ("center" or two-element numeric
vector) \cr legend.box \tab arrangement of multiple
legends ("horizontal" or "vertical") \cr legend.box.just
\tab justification of each legend within the overall
bounding box, when there are multiple legends ("top",
"bottom", "left", or "right")\cr
panel.background \tab background of plotting area, drawn
underneath plot (\code{element_rect}; inherits from
\code{rect}) \cr panel.border \tab border around plotting
area, drawn on top of plot so that it covers tick marks
and grid lines. This should be used with \code{fill=NA}
(\code{element_rect}; inherits from \code{rect}) \cr
panel.margin \tab margin around facet panels
(\code{unit}) \cr panel.grid \tab grid lines
(\code{element_line}; inherits from \code{line}) \cr
panel.grid.major \tab major grid lines
(\code{element_line}; inherits from \code{panel.grid})
\cr panel.grid.minor \tab minor grid lines
(\code{element_line}; inherits from \code{panel.grid})
\cr panel.grid.major.x \tab vertical major grid lines
(\code{element_line}; inherits from
\code{panel.grid.major}) \cr panel.grid.major.y \tab
horizontal major grid lines (\code{element_line};
inherits from \code{panel.grid.major}) \cr
panel.grid.minor.x \tab vertical minor grid lines
(\code{element_line}; inherits from
\code{panel.grid.minor}) \cr panel.grid.minor.y \tab
horizontal minor grid lines (\code{element_line};
inherits from \code{panel.grid.minor}) \cr
plot.background \tab background of the entire plot
(\code{element_rect}; inherits from \code{rect}) \cr
plot.title \tab plot title (text appearance)
(\code{element_text}; inherits from \code{title}) \cr
plot.margin \tab margin around entire plot (\code{unit}
with the sizes of the top, right, bottom, and left
margins) \cr
strip.background \tab background of facet labels
(\code{element_rect}; inherits from \code{rect}) \cr
strip.text \tab facet labels (\code{element_text};
inherits from \code{text}) \cr strip.text.x \tab facet
labels along horizontal direction (\code{element_text};
inherits from \code{strip.text}) \cr strip.text.y \tab
facet labels along vertical direction
(\code{element_text}; inherits from \code{strip.text})
\cr }
}
\examples{
\donttest{
p <- qplot(mpg, wt, data = mtcars)
p
p + theme(panel.background = element_rect(colour = "pink"))
p + theme_bw()
# Scatter plot of gas mileage by vehicle weight
p <- ggplot(mtcars, aes(x = wt, y = mpg)) + geom_point()
# Calculate slope and intercept of line of best fit
coef(lm(mpg ~ wt, data = mtcars))
p + geom_abline(intercept = 37, slope = -5)
# Calculate correlation coefficient
with(mtcars, cor(wt, mpg, use = "everything", method = "pearson"))
#annotate the plot
p + geom_abline(intercept = 37, slope = -5) +
geom_text(data = data.frame(), aes(4.5, 30, label = "Pearson-R = -.87"))
# Change the axis labels
# Original plot
p
p + xlab("Vehicle Weight") + ylab("Miles per Gallon")
# Or
p + labs(x = "Vehicle Weight", y = "Miles per Gallon")
# Change title appearance
p <- p + labs(title = "Vehicle Weight-Gas Mileage Relationship")
# Set title to twice the base font size
p + theme(plot.title = element_text(size = rel(2)))
p + theme(plot.title = element_text(size = rel(2), colour = "blue"))
# Changing plot look with themes
DF <- data.frame(x = rnorm(400))
m <- ggplot(DF, aes(x = x)) + geom_histogram()
# Default is theme_grey()
m
# Compare with
m + theme_bw()
# Manipulate Axis Attributes
library(grid) # for unit
m + theme(axis.line = element_line(size = 3, colour = "red", linetype = "dotted"))
m + theme(axis.text = element_text(colour = "blue"))
m + theme(axis.text.y = element_blank())
m + theme(axis.ticks = element_line(size = 2))
m + theme(axis.title.y = element_text(size = rel(1.5), angle = 90))
m + theme(axis.title.x = element_blank())
m + theme(axis.ticks.length = unit(.85, "cm"))
# Legend Attributes
z <- ggplot(mtcars, aes(wt, mpg, colour = factor(cyl))) + geom_point()
z
z + theme(legend.position = "none")
z + theme(legend.position = "bottom")
# Or use relative coordinates between 0 and 1
z + theme(legend.position = c(.5, .5))
z + theme(legend.background = element_rect(colour = "black"))
# Legend margin controls extra space around outside of legend:
z + theme(legend.background = element_rect(), legend.margin = unit(1, "cm"))
z + theme(legend.background = element_rect(), legend.margin = unit(0, "cm"))
# Or to just the keys
z + theme(legend.key = element_rect(colour = "black"))
z + theme(legend.key = element_rect(fill = "yellow"))
z + theme(legend.key.size = unit(2.5, "cm"))
z + theme(legend.text = element_text(size = 20, colour = "red", angle = 45))
z + theme(legend.title = element_text(face = "italic"))
# To change the title of the legend use the name argument
# in one of the scale options
z + scale_colour_brewer(name = "My Legend")
z + scale_colour_grey(name = "Number of \\nCylinders")
# Panel and Plot Attributes
z + theme(panel.background = element_rect(fill = "black"))
z + theme(panel.border = element_rect(linetype = "dashed", colour = "black"))
z + theme(panel.grid.major = element_line(colour = "blue"))
z + theme(panel.grid.minor = element_line(colour = "red", linetype = "dotted"))
z + theme(panel.grid.major = element_line(size = 2))
z + theme(panel.grid.major.y = element_blank(), panel.grid.minor.y = element_blank())
z + theme(plot.background = element_rect())
z + theme(plot.background = element_rect(fill = "green"))
# Faceting Attributes
set.seed(4940)
dsmall <- diamonds[sample(nrow(diamonds), 1000), ]
k <- ggplot(dsmall, aes(carat, ..density..)) + geom_histogram(binwidth = 0.2) +
facet_grid(. ~ cut)
k + theme(strip.background = element_rect(colour = "purple", fill = "pink",
size = 3, linetype = "dashed"))
k + theme(strip.text.x = element_text(colour = "red", angle = 45, size = 10,
hjust = 0.5, vjust = 0.5))
k + theme(panel.margin = unit(5, "lines"))
k + theme(panel.margin = unit(0, "lines"))
# Modify a theme and save it
mytheme <- theme_grey() + theme(plot.title = element_text(colour = "red"))
p + mytheme
## Run this to generate a graph of the element inheritance tree
build_element_graph <- function(tree) {
require(igraph)
require(plyr)
inheritdf <- function(name, item) {
if (length(item$inherit) == 0)
data.frame()
else
data.frame(child = name, parent = item$inherit)
}
edges <- rbind.fill(mapply(inheritdf, names(tree), tree))
# Explicitly add vertices (since not all are in edge list)
vertices <- data.frame(name = names(tree))
graph.data.frame(edges, vertices = vertices)
}
g <- build_element_graph(ggplot2:::.element_tree)
V(g)$label <- V(g)$name
set.seed(324)
par(mar=c(0,0,0,0)) # Remove unnecessary margins
plot(g, layout=layout.fruchterman.reingold, vertex.size=4, vertex.label.dist=.25)
}
}
\seealso{
\code{\link{+.gg}}
\code{\link{\%+replace\%}}
\code{\link{rel}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ratmaze.df}
\alias{ratmaze.df}
\title{Times taken for a rat to navigate through a maze}
\format{A data.frame with 135 rows and 4 columns:
\describe{
\item{subject}{An ID for each rat}
\item{treatment}{The treatment adminstered to the subject: control/none, thouiracil, thyroxin.}
\item{test}{A maze number.}
\item{time}{time, in seconds taken for the rat to navigate the maze.}
}}
\usage{
ratmaze.df
}
\description{
Times taken for a rat to navigate through a maze
}
\keyword{datasets}
| /man/ratmaze.df.Rd | no_license | cran/jaggR | R | false | true | 592 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ratmaze.df}
\alias{ratmaze.df}
\title{Times taken for a rat to navigate through a maze}
\format{A data.frame with 135 rows and 4 columns:
\describe{
\item{subject}{An ID for each rat}
\item{treatment}{The treatment adminstered to the subject: control/none, thouiracil, thyroxin.}
\item{test}{A maze number.}
\item{time}{time, in seconds taken for the rat to navigate the maze.}
}}
\usage{
ratmaze.df
}
\description{
Times taken for a rat to navigate through a maze
}
\keyword{datasets}
|
library(tidyverse)
rnorm(100)
#### AR(1) ####
y.50 <- arima.sim(model = list(ar = c(1.8, -.81)), n = 500)
plot(y.50)
plot(y.50, type = "o")
# plot ACF - autocorrelation function
acf(y.50)
## lag = rows
## blue lines are 95% CI
## larger sample size = closer estimate of ACF
# store values instead of plot
temp <- acf(y.50, plot = FALSE)
temp$acf
#### PACF ####
acf(y.50, type = "partial")
# You can look at plot of Z_t versus Z_t-1 for the AR(1) series
plot(lag(y.50, 1), y.50,
xlab = expression(Z[t-1]),
ylab = expression(Z[t]))
# R source code for generating data from AR(1) processes.
#set the seed so we all have the same time series
set.seed(1)
#### AR Example 1 ####
# AR(1) series with phi = 0.1
y.10 <- arima.sim(model=list(ar=0.1), n=100)
# AR(1) series with phi= 0.5
y.50 <- arima.sim(model=list(ar=0.5), n=100)
# AR(1) series with phi= -0.5
y.m50 <- arima.sim(model=list(ar=-0.5), n=100)
# AR(1) series with phi= 0.9
y.90 <- arima.sim(model=list(ar=.9), n=100)
# set up for a 2x2 matrix of graphs
par(mfrow=c(2,2))
plot(y.10)
title("phi=0.1, Stationary AR(1) process")
abline(h=0)
plot(y.50)
title("phi = 0.5, Stationary AR(1) process")
abline(h=0)
plot(y.m50)
title("phi = -0.5, Stationary AR(1) process")
abline(h=0)
plot(y.90)
title("phi = 0.9, Stationary AR(1) process")
abline(h=0)
#### Exmple 2 ####
# Now plot the ACF and PACF of the time series.
# R source code for generating ACF and PACF from AR(1) processes.
# set up for a 4x2 matrix of graphs
par(mfrow=c(4,2))
acf(y.10)
acf(y.10, type="partial")
acf(y.50)
acf(y.50, type="partial")
acf(y.m50)
acf(y.m50, type="partial")
acf(y.90)
acf(y.90, type="partial")
#### Example 3 - The Random Walk ####
# generate the random walk
# generate the noise (innovations)
set.seed(6)
# standard normal parameters
mu=0.0
sd=1
#generate n, N(0,1) RVs
n=100
e=rnorm(n=n,mean=mu,sd=sd)
# make plot region 2 x 2
par(mfrow=c(2,2))
# time plot of the innovations
plot(e, type="l")
abline(h=0,col='blue')
plot(e,type='b')
abline(h=0,col='blue')
plot(e,type='o')
abline(h=0,col='blue')
# generate random walk: Y_t = Y_t-1 + e_t
y=cumsum(e)
plot(y,type='o', xlab="Time", ylab="Y_t")
abline(h=0,col='blue')
title("Random Walk")
set.seed(27)
# AR(2)
y <- arima.sim(model = list(ar = c(0.05, 0.23)), n = 100)
names(y)
#### MA(1) ####
## The model: Zt = at - θ1at-1 (Book)
## The model: Zt = at + b1at-1 (R)
## example 3 : Simulate and plot the ACF and PACF of the time series for θ1 = 0.5
set.seed(13)
# y <- arima.sim(model=list(ma = 0.5), n=100)
y <- arima.sim(model=list(ma = -0.5), n=100)
## The one with negative is right since R notation is + b1at-1 instead of - in the book
## check theoratical values to compare
ARMAacf(ma = c(-0.5), lag.max = 20, pacf = FALSE)
ARMAacf(ma = c(-0.5), lag.max = 20, pacf = TRUE)
## plot the ACF
par(mfrow=c(2,2))
acf(y)
acf(y, type="partial")
## let try 1000
y <- arima.sim(model=list(ma = -0.5), n=1000)
acf(y)
acf(y, type="partial")
#### MA(2) ####
y <- arima.sim(model=list(ma = c(-0.65, -.24)), n=1000)
ARMAacf(ma = c(-0.65, -.24), lag.max = 20, pacf = FALSE)
ARMAacf(ma = c(-0.65, -.24), lag.max = 20, pacf = TRUE)
acf(y, plot = F)
acf(y, type="partial", plot = FALSE)
| /R/Lab-AR(p)-and-MA(q).R | no_license | dinhkristine/time-series | R | false | false | 3,254 | r | library(tidyverse)
rnorm(100)
#### AR(1) ####
y.50 <- arima.sim(model = list(ar = c(1.8, -.81)), n = 500)
plot(y.50)
plot(y.50, type = "o")
# plot ACF - autocorrelation function
acf(y.50)
## lag = rows
## blue lines are 95% CI
## larger sample size = closer estimate of ACF
# store values instead of plot
temp <- acf(y.50, plot = FALSE)
temp$acf
#### PACF ####
acf(y.50, type = "partial")
# You can look at plot of Z_t versus Z_t-1 for the AR(1) series
plot(lag(y.50, 1), y.50,
xlab = expression(Z[t-1]),
ylab = expression(Z[t]))
# R source code for generating data from AR(1) processes.
#set the seed so we all have the same time series
set.seed(1)
#### AR Example 1 ####
# AR(1) series with phi = 0.1
y.10 <- arima.sim(model=list(ar=0.1), n=100)
# AR(1) series with phi= 0.5
y.50 <- arima.sim(model=list(ar=0.5), n=100)
# AR(1) series with phi= -0.5
y.m50 <- arima.sim(model=list(ar=-0.5), n=100)
# AR(1) series with phi= 0.9
y.90 <- arima.sim(model=list(ar=.9), n=100)
# set up for a 2x2 matrix of graphs
par(mfrow=c(2,2))
plot(y.10)
title("phi=0.1, Stationary AR(1) process")
abline(h=0)
plot(y.50)
title("phi = 0.5, Stationary AR(1) process")
abline(h=0)
plot(y.m50)
title("phi = -0.5, Stationary AR(1) process")
abline(h=0)
plot(y.90)
title("phi = 0.9, Stationary AR(1) process")
abline(h=0)
#### Exmple 2 ####
# Now plot the ACF and PACF of the time series.
# R source code for generating ACF and PACF from AR(1) processes.
# set up for a 4x2 matrix of graphs
par(mfrow=c(4,2))
acf(y.10)
acf(y.10, type="partial")
acf(y.50)
acf(y.50, type="partial")
acf(y.m50)
acf(y.m50, type="partial")
acf(y.90)
acf(y.90, type="partial")
#### Example 3 - The Random Walk ####
# generate the random walk
# generate the noise (innovations)
set.seed(6)
# standard normal parameters
mu=0.0
sd=1
#generate n, N(0,1) RVs
n=100
e=rnorm(n=n,mean=mu,sd=sd)
# make plot region 2 x 2
par(mfrow=c(2,2))
# time plot of the innovations
plot(e, type="l")
abline(h=0,col='blue')
plot(e,type='b')
abline(h=0,col='blue')
plot(e,type='o')
abline(h=0,col='blue')
# generate random walk: Y_t = Y_t-1 + e_t
y=cumsum(e)
plot(y,type='o', xlab="Time", ylab="Y_t")
abline(h=0,col='blue')
title("Random Walk")
set.seed(27)
# AR(2)
y <- arima.sim(model = list(ar = c(0.05, 0.23)), n = 100)
names(y)
#### MA(1) ####
## The model: Zt = at - θ1at-1 (Book)
## The model: Zt = at + b1at-1 (R)
## example 3 : Simulate and plot the ACF and PACF of the time series for θ1 = 0.5
set.seed(13)
# y <- arima.sim(model=list(ma = 0.5), n=100)
y <- arima.sim(model=list(ma = -0.5), n=100)
## The one with negative is right since R notation is + b1at-1 instead of - in the book
## check theoratical values to compare
ARMAacf(ma = c(-0.5), lag.max = 20, pacf = FALSE)
ARMAacf(ma = c(-0.5), lag.max = 20, pacf = TRUE)
## plot the ACF
par(mfrow=c(2,2))
acf(y)
acf(y, type="partial")
## let try 1000
y <- arima.sim(model=list(ma = -0.5), n=1000)
acf(y)
acf(y, type="partial")
#### MA(2) ####
y <- arima.sim(model=list(ma = c(-0.65, -.24)), n=1000)
ARMAacf(ma = c(-0.65, -.24), lag.max = 20, pacf = FALSE)
ARMAacf(ma = c(-0.65, -.24), lag.max = 20, pacf = TRUE)
acf(y, plot = F)
acf(y, type="partial", plot = FALSE)
|
# I confirm that the attached is my own work, except where clearly indicated
# in the text.
source('BCaHelperFunctions.r') # Use Len's code to help with BCa Bootstrap
library(plot3D) # Used by the sim.plot.3D function
library(reshape2) # Used by the sim.plot.3D function
library(rgl) # Used by the sim.plot.3D function
library(magrittr) # ceci n'est pas une pipe, hon hon hon
non.parametric.sample <- function(data, n, check.inputs=F){
# purpose : produces n random samples of size length(data) from the supplied
# data, and returns the statistic of interest for each sample
#
# inputs : data - numeric vector of univariate observations
# n - positive integer number of samples to be drawn
#
# output : length(data)*n dimension matrix. Each column is a generated sample
if (check.inputs){ # optional input checks
if (class(data)!='numeric' & class(data)!='integer'){
stop('input data must be numeric')}
if (n%%1!=0 | n<1) stop('n must be a positive integer')
}
return(data %>% sample(length(data)*n, replace=T) %>% matrix(ncol=n) )
}
get.bca.alphas <- function(data, est, boot.est, alpha, func){
# purpose : calculates values alpha1 and alpha2, to be taken as the quantiles
# used to compute a BCa confidence interval
#
# inputs : data - numeric vector of univariate observations
# est - estimate of statistic of interest from original sample
# boot.est - estimate of statistic of interest from each of the
# bootstrap samples
# alpha - a value between 0 and 1 such that a (1-a)*100 %
# confidence interval will be produced.
# func - a function such that func(data) produces est. It is the
# function which provides an estimate of the statistic of
# interest given an input dataset.
#
# output : a named vector of alphas such that quantile(data, probs=alphas)
# returns the BCa confidence interval for the statistic of interest
#
# notes: - this method is treated as private, and so does not type check its
# inputs. It should only be called by functions which have already
# checked their inputs and produce consistent outputs of the correct
# format.
alpha <- alpha/2 # transform alpha s.t. we get a (1-2a)*100 % CI
zhat0 <- get.zhat0(est, boot.est)
ahat <- get.ahat(data, est, func)
# Calculate the values of alpha1 and alpha2 according to the formula specified
# in the assignment 3 outline pdf:
alpha1 <- zhat0 + ( (zhat0 + qnorm(alpha)) / (1-ahat*(zhat0 + qnorm(alpha))) )
alpha2 <- zhat0 + ((zhat0 + qnorm(1-alpha))/(1-ahat*(zhat0 + qnorm(1-alpha))))
alpha1 <- pnorm(alpha1) ; alpha2 <- pnorm(alpha2)
alphas <- c(alpha1,alpha2) ; names(alphas) <- c('alpha1','alpha2')
return(alphas)
}
bootstrap.type.checks <- function(data, n, alpha, func, method,
smooth.sd, dist.func=NULL){
# purpose : checks all the inputs of the bootstrap function are of the
# expected type and satisfy all required conditions they impose on
# each other
#
# inputs : data - vector of univariate observations
# n - number of bootstrap resamples
# alpha - target coverage of interval
# func - function which calculates statistic of interest
# method - character name of method to be used
# smooth.sd - measure of the sd of noise used in smooth bootstraps
# dist.func - character name of the function producing our deviates
#
# output : NULL if no checks fail, otherwise, the error message relevant to
# the first check which failed
if (class(data)!='numeric' & class(data)!='integer'){
stop('input data must be numeric')}
if ((method=='parametric'| method=='par.fit') & # if parametric: need a dist
!(is.character(dist.func) & # func name which is a func
is.function(try(match.fun(dist.func),silent=T))) ){
stop('when method is parametric or par.fit, dist.func must be provided')
}
if (method=='smooth' & (class(smooth.sd)!='numeric' | smooth.sd<0 |
length(smooth.sd)>1 )){
stop('When method = \'smooth\', smooth.sd must be a positive scalar')
}
if (n%%1!=0 | n<2) stop('n must be a positive integer greater than 1')
if (alpha<0 | alpha>1) stop('alpha must be between 0 and 1')
if (!is.function(func)) stop('invalid function supplied as func argument')
if (!method %in% c('percentile','BCa','parametric','smooth','par.fit')){
stop('invalid method')}
}
bootstrap <- function(data, n=999, alpha = 0.05, func = mean,
method = 'percentile', smooth.sd = 0.2,
dist.func = NULL, check.inputs=T, ...){
# purpose : produces a 1 - alpha % confidence interval for the 'func' of the
# data, using a non-parametric bootstrap of n samples of size 'size'
#
# inputs : data - numeric vector of observations from a univariate
# distribution
# n - the number of resamples to perform for the bootstrap
# alpha - a number between 0 and 1, such that a 1-alpha %
# confidence interval will be produced
# func - function to be used to calculate the statistic of
# interest for each sample. The default is 'mean'.
# method - 'percentile', 'BCa', 'parametric', 'smooth','par.fit'
# Specifies the bootstrap method to be used. When
# 'parametric' is chosen, the percentile method is used
# to calculate the confidence interval using the
# bootstrap samples, and a function from which to
# sample the data must be specified. All remaining
# options produce non-parametric bootstraps. Option
# 'smooth' adds a normal noise centred at 0. The
# chosen standard deviation is a fraction of the sample
# sd. This is set using the parameter 'smooth.sd'.
# 'par.fit' is a parametric percentile bootstrap, but
# it estimates the true parameters of the distribution
# from the data, rather than using the known values.
# Only works with dist.func = rnorm, rpois or rgamma.
# smooth.sd - Multiplier for the sample standard deviation. When
# method = 'smooth', a normal noise id added to
# each bootstrap resample. It has mean 0 and standard
# deviation smooth.sd * sd(data).
# dist.func - function to sample the data from when parametric is
# set to TRUE. It is assumed that the first argument
# in any call to dist.func is the number of random
# deviates to be produced, as is the convention with
# rnorm, runif, rpois, rgamma, etc. must be the
# character name of the function.
# check.inputs - Logical, if TRUE, all inputs are type checked
# ... - extra optional parameters to be passed to dist.func
#
# output : named vector containing the lower and upper bounds of the interval
# to allow the user to pass in the func or func name:
func <- try(match.fun(func), silent=T)
#### Input checks:
if (check.inputs){
# if dist.func has not been set, call the checking function with NULL
if (is.null(dist.func)) bootstrap.type.checks(data, n, alpha, func, method,
smooth.sd, NULL)
# otherwise, pass in dist.func itself. If we tried to do this when
# dist.func is.null(), we would get an error, hence this unsatisfactory
# way of dealing with the problem:
else {bootstrap.type.checks(data, n, alpha, func, method, smooth.sd,
dist.func)}
}
### End of input-checks
if (is.null(dist.func)==FALSE & (method=='parametric'| method=='par.fit')){
dist.func.name <- dist.func # for method=par.fit, we need to know
dist.func <- match.fun(dist.func) # the name of dist.func
}
ldata <- length(data)
# If the user has set smooth.sd to 0 and wants a smooth bootstrap, we obtain
# the same result more efficiently by simply providing them with a percentile
# bootstrap:
if (smooth.sd==0 & method=='smooth') method <- 'percentile'
if (method!='parametric' & method!='par.fit'){
# generate the bootstrap resamples:
samples <- non.parametric.sample(data, n)
if (method=='smooth'){ # add noise to the data for a smooth bootstrap
samples <- samples + matrix(data=rnorm(ldata*n, sd=sd(data)*smooth.sd),
nrow=ldata, ncol=n)
}
}
else{ # parametric resamples
if (method=='par.fit'){
# get MLEs for the parameters of the distribution:
dist.func.args <- switch(dist.func.name,
# if the data are normal, this is easy using
# sample statistics
rnorm=list(n=ldata,mean=mean(data),sd=sd(data)),
# same goes for poisson data
rpois=list(n=ldata, lambda=mean(data)),
# if the data are gamma, we need to call an MLE
# function to get the estimates, and extract
# initial guesses from the ones passed by the
# user using ... arguments.
rgamma=as.list(c(n=ldata, gammaMLE(
log(c(list(...)$rate,list(...)$shape)),data))))
# Note, we add in n=ldata to the list of estimated parameters so
# that the use of do.call() becomes possible
}
# If method=='parametric' we simply use the true parameter values as the
# ones we pass to dist.func:
else {dist.func.args <- as.list(c(n=ldata,list(...)))}
samples <- matrix(nrow=ldata,ncol=n)
# the replicate function worked badly with functions like rpois and rt,
# so a less efficient method has to be used to produce parametric samples:
for (i in 1:n){samples[,i] <- do.call(dist.func,dist.func.args)}
}
stats <- rep(NA, n) # some profiling revealed a for loop was
for (i in 1:n){ # in fact far faster than using the
stats[i] <- func(samples[,i]) # apply function to calculate the statistics
}
stats <- c(stats, func(data)) # add in O.G data statistic
lower <- alpha/2 # percentile method
upper <- 1 - alpha/2 # intervals
if (method=='BCa'){
alphas <- get.bca.alphas(data, func(data), stats, alpha, func)
lower <- alphas[1]
upper <- alphas[2]
}
CI <- quantile(stats, probs=c(lower, upper))
names(CI) <- c('lower','upper')
return(CI)
}
simulation <- function(dist.func, simulations, sample.n, boot.n, boot.method,
stat.func=mean, alpha=0.05, smooth.sd=0.2,...){
# purpose : run a set of simulations with different settings.
#
# inputs : dist.func - The function which should be used to generate the
# random data used at the start of each simulation
# simulations - The number of simulations to run ; how many
# bootstraps should be produced for each setting?
# sample.n - The sample size to be used every time a sample is
# generated using dist.func. Can be a vector.
# boot.n - The number of resamples each bootstrap should
# perform in order to produce its interval. Can be a
# vector
# boot.method - 'percentile', 'BCa', 'smooth' or 'parametric' as a
# character input. Can be a vector
# stat.func - The function which calculates the statistic of
# interest for which we are producing a confidence
# interval for
# alpha - We produce (1-alpha)*100 % confidence intervals
# smooth.sd - What fraction of the sample sd should the sd of the
# noise added to the data have for a smooth bootstrap?
# ... - Extra parameters to be passed to dist.func
#
# output : a multi-dimensional array with named dimensions, containing all of
# the statistics produced by the simulated intervals. Has class
# 'simulation.output.object'
# generate the multi-dimensional array which will store all of the generated
output <- array(data = NA, # intervals
dim = c(length(sample.n), length(boot.n),length(boot.method),
2*simulations),
dimnames = list(paste('sample.n:',as.character(sample.n)),
paste('boot.n:',as.character(boot.n)),
paste('boot.method:',boot.method))
)
for (sample.n.setting in sample.n){
for (boot.n.setting in boot.n){
for (boot.method.setting in boot.method){
sample.n.index <- which(sample.n==sample.n.setting) # extract indices
boot.n.index <- which(boot.n==boot.n.setting) # of settings
boot.method.index <- which(boot.method==boot.method.setting)
sims <- matrix(nrow=2, ncol=simulations)
dist.function <- match.fun(dist.func)
for (i in 1:simulations){
dataset <- dist.function(sample.n.setting, ...) # get the O.G. sample
# get the bootstrap interval for that dataset:
boot <- bootstrap(dataset, n=boot.n.setting, alpha=alpha,
func = stat.func,
method = boot.method.setting,
smooth.sd = smooth.sd,
dist.func = dist.func, ...)
# add the bootstrap to the matrix of results:
sims[,i] <- boot
}
# add the set of simulated bootstrap intervals to the output array:
output[sample.n.index, boot.n.index, boot.method.index,] <- sims
}
}
}
class(output) <- 'simulation.output.object'
return(output)
}
calculate.summaries <- function(simulation.output.object, true.value){
# purpose : takes as input a simulation.output.object and the true.value of
# the statistic for the distribution used to produce the deviates
# and calculates some summaries (coverage, length etc.) using the
# simulation results contained in the simulation.output.object
#
# inputs : simulation.output.object - the result of calling the function
# 'simulation' which is a multi-dimensional array containing
# all the simulated bootstrap intervals at each level of the
# simulation settings
#
# output : a simulation.summaries object, it is simply a
# simulation.output.object with the 4th dimension of the array
# representing the various summaries we have calculated for those
# simuation settings
if (class(simulation.output.object)!='simulation.output.object'){
stop('input must be a valid simulation.output.object')
}
if (class(true.value)!='numeric') stop('true.value must be a real number')
dims <- dim(simulation.output.object)
output <- array(dim = c(dims[1],dims[2],dims[3],3))
dimnames(output) <- dimnames(simulation.output.object)
for (i in 1:dims[1]){ # With nested for loops, go through the
for(j in 1:dims[2]){ # simulated bootstrap intervals and calculate
for(k in 1:dims[3]){ # the summary statistics of interest:
boot.ints <- simulation.output.object[i,j,k,] # extract intervals
coverage <- get.coverage(boot.ints,true.value) # calculate the
length <- get.length(boot.ints) # statistics
failure.tend <- get.coverage(boot.ints,true.value,failure.t=T)
summaries <- c(coverage,length,failure.tend) # add them to the output
names(summaries) <- c('coverage','length','failure tendency') # object
output[i,j,k,] <- summaries # with appropriate names
}
}
}
class(output) <- 'simulation.summary.object'
return(output)
}
get.coverage <- function(bootstrap.results, true.value, failure.t=FALSE){
# purpose : returns the observed coverage, given a vector which contains
# a sequence of confidence intervals
#
# input : bootstrap.results - a vector containing bootstrap intervals in the
# format c(lower1, upper1, lower2, upper2, etc.)
# true.value - the true value of the statistic of interest.
# Allows for the calculation of the coverage
# failure.t - failure tendency. Allows the function to
# return the failure tendency rather than the
# coverage. Failure tendency is a measure (from
# 0 to 1), of the proportion of the time the
# true value of the statistic was to the left of
# the confidence interval
#
# output : numeric scalar ; the observed coverage given the vector of
# bootstrap intervals
if ( class(bootstrap.results)!='numeric' | class(true.value)!='numeric'){
stop('invalid input')}
n = length(bootstrap.results)
if (n%%2!=0) stop('input of odd length is not allowed')
lowers <- bootstrap.results[seq(1,n,2)] # we split our intervals into
uppers <- bootstrap.results[seq(2,n,2)] # vectors of lower and upper bounds
# is the true.value contained in each of our confidence intervals? :
in.interval <- (true.value>=lowers & true.value<=uppers)
if(!failure.t){return(sum(in.interval)/(n/2))} # return the observed coverage
else{
not.in <- as.logical(1-in.interval)
failed.lowers <- lowers[not.in] # return the observed
return(sum(true.value<failed.lowers)/sum(not.in)) # failure tendency
}
}
get.length <- function(bootstrap.results){
# purpose : returns the observed average interval length, given a vector which
# contains a sequence of confidence intervals
#
# input : bootstrap.results - a vector containing bootstrap intervals in the
# format c(lower1, upper1, lower2, upper2, etc.)
#
# output : numeric scalar ; the observed average interval length given the
# vector of bootstrap intervals
if ( class(bootstrap.results)!='numeric') stop('invalid input')
n = length(bootstrap.results)
if (n%%2!=0) stop('input of odd length is not allowed')
lowers <- bootstrap.results[seq(1,n,2)] # we split our intervals into
uppers <- bootstrap.results[seq(2,n,2)] # vectors of lower and upper bounds
return(mean(abs(uppers-lowers)))# return the estimated average interval length
}
plot.simulation.summary.object <- function(simulation.summary.object,
statistic='coverage',fix.to.top=F,
...){
# purpose : plots the statistic of interest for a set of simulation
# bootstrap confidence intervals, for all levels of 'factor'. Fixes
# the other setting values at their highest setting i.e. uses the
# the largest sample size and bootstrap resamples available.
#
# inputs : simulation.summary.object - array of summary statistics for
# simulation intervals.
# statistic - summary statistic of interest,
# 'coverage', 'length','failure
# tendency'
# fix.to.top - if FALSE, we average over the dimension
# not being plotted (either sample size
# or bootstrap resamples). If TRUE,
# we fix that dimension to its highest
# value instead
# ... - extra optional parameters to be
# passed to matplot
# output : None, produces a plot.
if (class(simulation.summary.object)!='simulation.summary.object'){
stop('invalid input type')}
if ( !(statistic %in% c('coverage','length','failure tendency')) ){
stop('invalid choice of statistic')}
# fetch summary statistic index:
stat.ind <- switch(statistic,'coverage'=1, 'length'=2, 'failure tendency'=3)
# get dimensions of summary object:
dims <- dim(simulation.summary.object)
dims.not.stats <- dim(simulation.summary.object[,,,1])
msg1 <- 'Can only plot summaries when sample.n, bootstrap.n and'
msg2 <- 'method are all vectors'
if (any(dims.not.stats<2)) stop(paste(msg1, msg2))
for (plot.num in c(1,2)){
# generate sample size plot first, then bootstrap plot
# extract x axis values from the simulation.summary.object dimnames:
x <- as.numeric(gsub('[^0-9]','',
dimnames(simulation.summary.object)[[plot.num]]))
# extract statistic values and average over index not being plotted:
if (!fix.to.top){ # either we average over the other dimension
y <- apply(simulation.summary.object[,,,stat.ind], c(plot.num,3), mean)
}
else{
ifelse(plot.num==1, #... or we fix it at its highest value
y <- simulation.summary.object[,dims[2],,stat.ind],
y <- simulation.summary.object[dims[1],,,stat.ind])
}
xlab = c('sample size','bootstrap resamples')[plot.num]
# Draw the plot:
method.names <- gsub('boot.method: ','',
dimnames(simulation.summary.object)[[3]])
matplot(x,y,ylab=statistic,xlab=xlab,type='l',col=seq(1,dims[3]),...)
legend('topright',method.names,lty=1,col=seq(1,dims[3]),bty='n',cex=.75)
}
}
sim.plot.3D <- function(simulation.summary.object, statistic, method,hist=F,
...){
# purpose : produces 3D plots of summary statistics for simulated bootstrap
# intervals. Uses methods available by various packages and produces
# 2 different types of 3D plot.
#
# inputs : simulation.summary.object - the multi-dimensional array containing
# the summary statistics for each level
# of simulation setting.
# statistic - the character name of the statistic
# to be plotted
# method - the integer index of the method to be
# plotted. Which values are valid
# depends on the shape of the object
# passed to calculate.summaries
# hist - logical parameter. If TRUE, produces a
# 3D histogram instead of a 3D
# 3D perspective plot
# ... - extra optional parameters to be passed
# to the scatter 3D function.
#
# ouput : list containing:
# x - the sample size values used for the persp3D plot
# y - the bootstrap resample values used for the persp3D plot
# z - the matrix of statistic values corresponding to the z
# values of x and y for the persp3D plot
#
# note : The output is likely of no use to the user, unless they choose
# to obtain a plot using this data and a different 3D plotting
# function. The purpose of the output is primarily for debugging
# and ensuring the data look as expected.
### input checks:
if (class(simulation.summary.object)!='simulation.summary.object'){
stop('invalid input type')}
if ( !(statistic %in% c('coverage','length','failure tendency')) ){
stop('invalid choice of statistic')}
if (method<1 | method%%1!=0 | method>dim(simulation.summary.object)[3]){
stop('invalid choice of method')}
if (any(dim(simulation.summary.object[,,,1])<2)){
stop('all simulation settings must be vectors to plot in 3D')
}
### end of input checks ###
# fetch summary statistic index:
stat.ind <- switch(statistic, 'coverage'=1, 'length'=2, 'failure tendency'=3)
Dnames <- dimnames(simulation.summary.object) # extract method
method.name <- gsub('boot.method: ','',Dnames[[3]][method]) # name
### Format the data for the call to scatter3D and produce a 3D scatter:
M <- melt(simulation.summary.object[,,method,stat.ind])
x <- as.numeric(gsub('[^0-9]','',M$Var1)) # extract x, y, and z coordinate
y <- as.numeric(gsub('[^0-9]','',M$Var2)) # values from the melted object
z <- M$value # and produce our plot
scatter3D(x, y, z, main=method.name,xlab='sample size',
ylab='bootstrap resamples', zlab=statistic,...)
### Format the data for the call to persp3D and draw the surface:
x <- as.numeric(gsub('sample.n: ','',Dnames[[1]])) # extract the numeric
y <- as.numeric(gsub('boot.n: ','',Dnames[[2]])) # values of the xs and ys,
z <- simulation.summary.object[,,method,stat.ind] # and the matching z matrix
ifelse(hist, func3D <- hist3D, func3D <- persp3D)
func3D(x,y,z,xlab='sample size',ylab='bootstrap resamples', zlab=statistic,
main=method.name)
invisible(list(x,y,z)) # to avoid a potentially large matrix from printing
}
gamma.neg.log.lik <- function(par, x){
# purpose : evaluates the negative log likelihood of a gamma distribution
# given parameter guesses on the real line, and data x
#
# inputs : par - parameter estimates for rate and shape as a vector on the
# real line. A log link is applied to transform these values
# to positive ones.
#
# output : numeric scalar, the negative log likelihood evaluated at x and
# the transformed par
par <- exp(par) # log links to keep alpha and beta positive
alpha <- par[1] ; beta <- par[2]
loglik <- dgamma(x, rate=alpha, shape=beta) %>% log %>% sum
return(-loglik)
}
gammaMLE <- function(par,x,...){
# purpose : Maximum likelihood estimation of parameters for a gamma
# distribution, given observations and initial guesses on the
# real line
#
# inputs : par - values such that exp(par) gives the initial estimates of
# the rate and shape parameters of the gamma distribution,
# respectively
# x - vector of observations from the gamma process in question
# ... - extra optional parameters to be passed to optim
#
# output : the estimated parameters as a list
ests <- exp(optim(par, gamma.neg.log.lik, x=x,...)$par)
return(list(rate=ests[1],shape=ests[2]))
} | /Hand in/simulation.r | no_license | penguin-coding/asmt3 | R | false | false | 28,186 | r | # I confirm that the attached is my own work, except where clearly indicated
# in the text.
source('BCaHelperFunctions.r') # Use Len's code to help with BCa Bootstrap
library(plot3D) # Used by the sim.plot.3D function
library(reshape2) # Used by the sim.plot.3D function
library(rgl) # Used by the sim.plot.3D function
library(magrittr) # ceci n'est pas une pipe, hon hon hon
non.parametric.sample <- function(data, n, check.inputs=F){
# purpose : produces n random samples of size length(data) from the supplied
# data, and returns the statistic of interest for each sample
#
# inputs : data - numeric vector of univariate observations
# n - positive integer number of samples to be drawn
#
# output : length(data)*n dimension matrix. Each column is a generated sample
if (check.inputs){ # optional input checks
if (class(data)!='numeric' & class(data)!='integer'){
stop('input data must be numeric')}
if (n%%1!=0 | n<1) stop('n must be a positive integer')
}
return(data %>% sample(length(data)*n, replace=T) %>% matrix(ncol=n) )
}
get.bca.alphas <- function(data, est, boot.est, alpha, func){
# purpose : calculates values alpha1 and alpha2, to be taken as the quantiles
# used to compute a BCa confidence interval
#
# inputs : data - numeric vector of univariate observations
# est - estimate of statistic of interest from original sample
# boot.est - estimate of statistic of interest from each of the
# bootstrap samples
# alpha - a value between 0 and 1 such that a (1-a)*100 %
# confidence interval will be produced.
# func - a function such that func(data) produces est. It is the
# function which provides an estimate of the statistic of
# interest given an input dataset.
#
# output : a named vector of alphas such that quantile(data, probs=alphas)
# returns the BCa confidence interval for the statistic of interest
#
# notes: - this method is treated as private, and so does not type check its
# inputs. It should only be called by functions which have already
# checked their inputs and produce consistent outputs of the correct
# format.
alpha <- alpha/2 # transform alpha s.t. we get a (1-2a)*100 % CI
zhat0 <- get.zhat0(est, boot.est)
ahat <- get.ahat(data, est, func)
# Calculate the values of alpha1 and alpha2 according to the formula specified
# in the assignment 3 outline pdf:
alpha1 <- zhat0 + ( (zhat0 + qnorm(alpha)) / (1-ahat*(zhat0 + qnorm(alpha))) )
alpha2 <- zhat0 + ((zhat0 + qnorm(1-alpha))/(1-ahat*(zhat0 + qnorm(1-alpha))))
alpha1 <- pnorm(alpha1) ; alpha2 <- pnorm(alpha2)
alphas <- c(alpha1,alpha2) ; names(alphas) <- c('alpha1','alpha2')
return(alphas)
}
bootstrap.type.checks <- function(data, n, alpha, func, method,
smooth.sd, dist.func=NULL){
# purpose : checks all the inputs of the bootstrap function are of the
# expected type and satisfy all required conditions they impose on
# each other
#
# inputs : data - vector of univariate observations
# n - number of bootstrap resamples
# alpha - target coverage of interval
# func - function which calculates statistic of interest
# method - character name of method to be used
# smooth.sd - measure of the sd of noise used in smooth bootstraps
# dist.func - character name of the function producing our deviates
#
# output : NULL if no checks fail, otherwise, the error message relevant to
# the first check which failed
if (class(data)!='numeric' & class(data)!='integer'){
stop('input data must be numeric')}
if ((method=='parametric'| method=='par.fit') & # if parametric: need a dist
!(is.character(dist.func) & # func name which is a func
is.function(try(match.fun(dist.func),silent=T))) ){
stop('when method is parametric or par.fit, dist.func must be provided')
}
if (method=='smooth' & (class(smooth.sd)!='numeric' | smooth.sd<0 |
length(smooth.sd)>1 )){
stop('When method = \'smooth\', smooth.sd must be a positive scalar')
}
if (n%%1!=0 | n<2) stop('n must be a positive integer greater than 1')
if (alpha<0 | alpha>1) stop('alpha must be between 0 and 1')
if (!is.function(func)) stop('invalid function supplied as func argument')
if (!method %in% c('percentile','BCa','parametric','smooth','par.fit')){
stop('invalid method')}
}
bootstrap <- function(data, n=999, alpha = 0.05, func = mean,
method = 'percentile', smooth.sd = 0.2,
dist.func = NULL, check.inputs=T, ...){
# purpose : produces a 1 - alpha % confidence interval for the 'func' of the
# data, using a non-parametric bootstrap of n samples of size 'size'
#
# inputs : data - numeric vector of observations from a univariate
# distribution
# n - the number of resamples to perform for the bootstrap
# alpha - a number between 0 and 1, such that a 1-alpha %
# confidence interval will be produced
# func - function to be used to calculate the statistic of
# interest for each sample. The default is 'mean'.
# method - 'percentile', 'BCa', 'parametric', 'smooth','par.fit'
# Specifies the bootstrap method to be used. When
# 'parametric' is chosen, the percentile method is used
# to calculate the confidence interval using the
# bootstrap samples, and a function from which to
# sample the data must be specified. All remaining
# options produce non-parametric bootstraps. Option
# 'smooth' adds a normal noise centred at 0. The
# chosen standard deviation is a fraction of the sample
# sd. This is set using the parameter 'smooth.sd'.
# 'par.fit' is a parametric percentile bootstrap, but
# it estimates the true parameters of the distribution
# from the data, rather than using the known values.
# Only works with dist.func = rnorm, rpois or rgamma.
# smooth.sd - Multiplier for the sample standard deviation. When
# method = 'smooth', a normal noise id added to
# each bootstrap resample. It has mean 0 and standard
# deviation smooth.sd * sd(data).
# dist.func - function to sample the data from when parametric is
# set to TRUE. It is assumed that the first argument
# in any call to dist.func is the number of random
# deviates to be produced, as is the convention with
# rnorm, runif, rpois, rgamma, etc. must be the
# character name of the function.
# check.inputs - Logical, if TRUE, all inputs are type checked
# ... - extra optional parameters to be passed to dist.func
#
# output : named vector containing the lower and upper bounds of the interval
# to allow the user to pass in the func or func name:
func <- try(match.fun(func), silent=T)
#### Input checks:
if (check.inputs){
# if dist.func has not been set, call the checking function with NULL
if (is.null(dist.func)) bootstrap.type.checks(data, n, alpha, func, method,
smooth.sd, NULL)
# otherwise, pass in dist.func itself. If we tried to do this when
# dist.func is.null(), we would get an error, hence this unsatisfactory
# way of dealing with the problem:
else {bootstrap.type.checks(data, n, alpha, func, method, smooth.sd,
dist.func)}
}
### End of input-checks
if (is.null(dist.func)==FALSE & (method=='parametric'| method=='par.fit')){
dist.func.name <- dist.func # for method=par.fit, we need to know
dist.func <- match.fun(dist.func) # the name of dist.func
}
ldata <- length(data)
# If the user has set smooth.sd to 0 and wants a smooth bootstrap, we obtain
# the same result more efficiently by simply providing them with a percentile
# bootstrap:
if (smooth.sd==0 & method=='smooth') method <- 'percentile'
if (method!='parametric' & method!='par.fit'){
# generate the bootstrap resamples:
samples <- non.parametric.sample(data, n)
if (method=='smooth'){ # add noise to the data for a smooth bootstrap
samples <- samples + matrix(data=rnorm(ldata*n, sd=sd(data)*smooth.sd),
nrow=ldata, ncol=n)
}
}
else{ # parametric resamples
if (method=='par.fit'){
# get MLEs for the parameters of the distribution:
dist.func.args <- switch(dist.func.name,
# if the data are normal, this is easy using
# sample statistics
rnorm=list(n=ldata,mean=mean(data),sd=sd(data)),
# same goes for poisson data
rpois=list(n=ldata, lambda=mean(data)),
# if the data are gamma, we need to call an MLE
# function to get the estimates, and extract
# initial guesses from the ones passed by the
# user using ... arguments.
rgamma=as.list(c(n=ldata, gammaMLE(
log(c(list(...)$rate,list(...)$shape)),data))))
# Note, we add in n=ldata to the list of estimated parameters so
# that the use of do.call() becomes possible
}
# If method=='parametric' we simply use the true parameter values as the
# ones we pass to dist.func:
else {dist.func.args <- as.list(c(n=ldata,list(...)))}
samples <- matrix(nrow=ldata,ncol=n)
# the replicate function worked badly with functions like rpois and rt,
# so a less efficient method has to be used to produce parametric samples:
for (i in 1:n){samples[,i] <- do.call(dist.func,dist.func.args)}
}
stats <- rep(NA, n) # some profiling revealed a for loop was
for (i in 1:n){ # in fact far faster than using the
stats[i] <- func(samples[,i]) # apply function to calculate the statistics
}
stats <- c(stats, func(data)) # add in O.G data statistic
lower <- alpha/2 # percentile method
upper <- 1 - alpha/2 # intervals
if (method=='BCa'){
alphas <- get.bca.alphas(data, func(data), stats, alpha, func)
lower <- alphas[1]
upper <- alphas[2]
}
CI <- quantile(stats, probs=c(lower, upper))
names(CI) <- c('lower','upper')
return(CI)
}
simulation <- function(dist.func, simulations, sample.n, boot.n, boot.method,
stat.func=mean, alpha=0.05, smooth.sd=0.2,...){
# purpose : run a set of simulations with different settings.
#
# inputs : dist.func - The function which should be used to generate the
# random data used at the start of each simulation
# simulations - The number of simulations to run ; how many
# bootstraps should be produced for each setting?
# sample.n - The sample size to be used every time a sample is
# generated using dist.func. Can be a vector.
# boot.n - The number of resamples each bootstrap should
# perform in order to produce its interval. Can be a
# vector
# boot.method - 'percentile', 'BCa', 'smooth' or 'parametric' as a
# character input. Can be a vector
# stat.func - The function which calculates the statistic of
# interest for which we are producing a confidence
# interval for
# alpha - We produce (1-alpha)*100 % confidence intervals
# smooth.sd - What fraction of the sample sd should the sd of the
# noise added to the data have for a smooth bootstrap?
# ... - Extra parameters to be passed to dist.func
#
# output : a multi-dimensional array with named dimensions, containing all of
# the statistics produced by the simulated intervals. Has class
# 'simulation.output.object'
# generate the multi-dimensional array which will store all of the generated
output <- array(data = NA, # intervals
dim = c(length(sample.n), length(boot.n),length(boot.method),
2*simulations),
dimnames = list(paste('sample.n:',as.character(sample.n)),
paste('boot.n:',as.character(boot.n)),
paste('boot.method:',boot.method))
)
for (sample.n.setting in sample.n){
for (boot.n.setting in boot.n){
for (boot.method.setting in boot.method){
sample.n.index <- which(sample.n==sample.n.setting) # extract indices
boot.n.index <- which(boot.n==boot.n.setting) # of settings
boot.method.index <- which(boot.method==boot.method.setting)
sims <- matrix(nrow=2, ncol=simulations)
dist.function <- match.fun(dist.func)
for (i in 1:simulations){
dataset <- dist.function(sample.n.setting, ...) # get the O.G. sample
# get the bootstrap interval for that dataset:
boot <- bootstrap(dataset, n=boot.n.setting, alpha=alpha,
func = stat.func,
method = boot.method.setting,
smooth.sd = smooth.sd,
dist.func = dist.func, ...)
# add the bootstrap to the matrix of results:
sims[,i] <- boot
}
# add the set of simulated bootstrap intervals to the output array:
output[sample.n.index, boot.n.index, boot.method.index,] <- sims
}
}
}
class(output) <- 'simulation.output.object'
return(output)
}
calculate.summaries <- function(simulation.output.object, true.value){
# purpose : takes as input a simulation.output.object and the true.value of
# the statistic for the distribution used to produce the deviates
# and calculates some summaries (coverage, length etc.) using the
# simulation results contained in the simulation.output.object
#
# inputs : simulation.output.object - the result of calling the function
# 'simulation' which is a multi-dimensional array containing
# all the simulated bootstrap intervals at each level of the
# simulation settings
#
# output : a simulation.summaries object, it is simply a
# simulation.output.object with the 4th dimension of the array
# representing the various summaries we have calculated for those
# simuation settings
if (class(simulation.output.object)!='simulation.output.object'){
stop('input must be a valid simulation.output.object')
}
if (class(true.value)!='numeric') stop('true.value must be a real number')
dims <- dim(simulation.output.object)
output <- array(dim = c(dims[1],dims[2],dims[3],3))
dimnames(output) <- dimnames(simulation.output.object)
for (i in 1:dims[1]){ # With nested for loops, go through the
for(j in 1:dims[2]){ # simulated bootstrap intervals and calculate
for(k in 1:dims[3]){ # the summary statistics of interest:
boot.ints <- simulation.output.object[i,j,k,] # extract intervals
coverage <- get.coverage(boot.ints,true.value) # calculate the
length <- get.length(boot.ints) # statistics
failure.tend <- get.coverage(boot.ints,true.value,failure.t=T)
summaries <- c(coverage,length,failure.tend) # add them to the output
names(summaries) <- c('coverage','length','failure tendency') # object
output[i,j,k,] <- summaries # with appropriate names
}
}
}
class(output) <- 'simulation.summary.object'
return(output)
}
get.coverage <- function(bootstrap.results, true.value, failure.t=FALSE){
# purpose : returns the observed coverage, given a vector which contains
# a sequence of confidence intervals
#
# input : bootstrap.results - a vector containing bootstrap intervals in the
# format c(lower1, upper1, lower2, upper2, etc.)
# true.value - the true value of the statistic of interest.
# Allows for the calculation of the coverage
# failure.t - failure tendency. Allows the function to
# return the failure tendency rather than the
# coverage. Failure tendency is a measure (from
# 0 to 1), of the proportion of the time the
# true value of the statistic was to the left of
# the confidence interval
#
# output : numeric scalar ; the observed coverage given the vector of
# bootstrap intervals
if ( class(bootstrap.results)!='numeric' | class(true.value)!='numeric'){
stop('invalid input')}
n = length(bootstrap.results)
if (n%%2!=0) stop('input of odd length is not allowed')
lowers <- bootstrap.results[seq(1,n,2)] # we split our intervals into
uppers <- bootstrap.results[seq(2,n,2)] # vectors of lower and upper bounds
# is the true.value contained in each of our confidence intervals? :
in.interval <- (true.value>=lowers & true.value<=uppers)
if(!failure.t){return(sum(in.interval)/(n/2))} # return the observed coverage
else{
not.in <- as.logical(1-in.interval)
failed.lowers <- lowers[not.in] # return the observed
return(sum(true.value<failed.lowers)/sum(not.in)) # failure tendency
}
}
get.length <- function(bootstrap.results){
# purpose : returns the observed average interval length, given a vector which
# contains a sequence of confidence intervals
#
# input : bootstrap.results - a vector containing bootstrap intervals in the
# format c(lower1, upper1, lower2, upper2, etc.)
#
# output : numeric scalar ; the observed average interval length given the
# vector of bootstrap intervals
if ( class(bootstrap.results)!='numeric') stop('invalid input')
n = length(bootstrap.results)
if (n%%2!=0) stop('input of odd length is not allowed')
lowers <- bootstrap.results[seq(1,n,2)] # we split our intervals into
uppers <- bootstrap.results[seq(2,n,2)] # vectors of lower and upper bounds
return(mean(abs(uppers-lowers)))# return the estimated average interval length
}
plot.simulation.summary.object <- function(simulation.summary.object,
statistic='coverage',fix.to.top=F,
...){
# purpose : plots the statistic of interest for a set of simulation
# bootstrap confidence intervals, for all levels of 'factor'. Fixes
# the other setting values at their highest setting i.e. uses the
# the largest sample size and bootstrap resamples available.
#
# inputs : simulation.summary.object - array of summary statistics for
# simulation intervals.
# statistic - summary statistic of interest,
# 'coverage', 'length','failure
# tendency'
# fix.to.top - if FALSE, we average over the dimension
# not being plotted (either sample size
# or bootstrap resamples). If TRUE,
# we fix that dimension to its highest
# value instead
# ... - extra optional parameters to be
# passed to matplot
# output : None, produces a plot.
if (class(simulation.summary.object)!='simulation.summary.object'){
stop('invalid input type')}
if ( !(statistic %in% c('coverage','length','failure tendency')) ){
stop('invalid choice of statistic')}
# fetch summary statistic index:
stat.ind <- switch(statistic,'coverage'=1, 'length'=2, 'failure tendency'=3)
# get dimensions of summary object:
dims <- dim(simulation.summary.object)
dims.not.stats <- dim(simulation.summary.object[,,,1])
msg1 <- 'Can only plot summaries when sample.n, bootstrap.n and'
msg2 <- 'method are all vectors'
if (any(dims.not.stats<2)) stop(paste(msg1, msg2))
for (plot.num in c(1,2)){
# generate sample size plot first, then bootstrap plot
# extract x axis values from the simulation.summary.object dimnames:
x <- as.numeric(gsub('[^0-9]','',
dimnames(simulation.summary.object)[[plot.num]]))
# extract statistic values and average over index not being plotted:
if (!fix.to.top){ # either we average over the other dimension
y <- apply(simulation.summary.object[,,,stat.ind], c(plot.num,3), mean)
}
else{
ifelse(plot.num==1, #... or we fix it at its highest value
y <- simulation.summary.object[,dims[2],,stat.ind],
y <- simulation.summary.object[dims[1],,,stat.ind])
}
xlab = c('sample size','bootstrap resamples')[plot.num]
# Draw the plot:
method.names <- gsub('boot.method: ','',
dimnames(simulation.summary.object)[[3]])
matplot(x,y,ylab=statistic,xlab=xlab,type='l',col=seq(1,dims[3]),...)
legend('topright',method.names,lty=1,col=seq(1,dims[3]),bty='n',cex=.75)
}
}
sim.plot.3D <- function(simulation.summary.object, statistic, method,hist=F,
...){
# purpose : produces 3D plots of summary statistics for simulated bootstrap
# intervals. Uses methods available by various packages and produces
# 2 different types of 3D plot.
#
# inputs : simulation.summary.object - the multi-dimensional array containing
# the summary statistics for each level
# of simulation setting.
# statistic - the character name of the statistic
# to be plotted
# method - the integer index of the method to be
# plotted. Which values are valid
# depends on the shape of the object
# passed to calculate.summaries
# hist - logical parameter. If TRUE, produces a
# 3D histogram instead of a 3D
# 3D perspective plot
# ... - extra optional parameters to be passed
# to the scatter 3D function.
#
# ouput : list containing:
# x - the sample size values used for the persp3D plot
# y - the bootstrap resample values used for the persp3D plot
# z - the matrix of statistic values corresponding to the z
# values of x and y for the persp3D plot
#
# note : The output is likely of no use to the user, unless they choose
# to obtain a plot using this data and a different 3D plotting
# function. The purpose of the output is primarily for debugging
# and ensuring the data look as expected.
### input checks:
if (class(simulation.summary.object)!='simulation.summary.object'){
stop('invalid input type')}
if ( !(statistic %in% c('coverage','length','failure tendency')) ){
stop('invalid choice of statistic')}
if (method<1 | method%%1!=0 | method>dim(simulation.summary.object)[3]){
stop('invalid choice of method')}
if (any(dim(simulation.summary.object[,,,1])<2)){
stop('all simulation settings must be vectors to plot in 3D')
}
### end of input checks ###
# fetch summary statistic index:
stat.ind <- switch(statistic, 'coverage'=1, 'length'=2, 'failure tendency'=3)
Dnames <- dimnames(simulation.summary.object) # extract method
method.name <- gsub('boot.method: ','',Dnames[[3]][method]) # name
### Format the data for the call to scatter3D and produce a 3D scatter:
M <- melt(simulation.summary.object[,,method,stat.ind])
x <- as.numeric(gsub('[^0-9]','',M$Var1)) # extract x, y, and z coordinate
y <- as.numeric(gsub('[^0-9]','',M$Var2)) # values from the melted object
z <- M$value # and produce our plot
scatter3D(x, y, z, main=method.name,xlab='sample size',
ylab='bootstrap resamples', zlab=statistic,...)
### Format the data for the call to persp3D and draw the surface:
x <- as.numeric(gsub('sample.n: ','',Dnames[[1]])) # extract the numeric
y <- as.numeric(gsub('boot.n: ','',Dnames[[2]])) # values of the xs and ys,
z <- simulation.summary.object[,,method,stat.ind] # and the matching z matrix
ifelse(hist, func3D <- hist3D, func3D <- persp3D)
func3D(x,y,z,xlab='sample size',ylab='bootstrap resamples', zlab=statistic,
main=method.name)
invisible(list(x,y,z)) # to avoid a potentially large matrix from printing
}
gamma.neg.log.lik <- function(par, x){
# purpose : evaluates the negative log likelihood of a gamma distribution
# given parameter guesses on the real line, and data x
#
# inputs : par - parameter estimates for rate and shape as a vector on the
# real line. A log link is applied to transform these values
# to positive ones.
#
# output : numeric scalar, the negative log likelihood evaluated at x and
# the transformed par
par <- exp(par) # log links to keep alpha and beta positive
alpha <- par[1] ; beta <- par[2]
loglik <- dgamma(x, rate=alpha, shape=beta) %>% log %>% sum
return(-loglik)
}
gammaMLE <- function(par,x,...){
# purpose : Maximum likelihood estimation of parameters for a gamma
# distribution, given observations and initial guesses on the
# real line
#
# inputs : par - values such that exp(par) gives the initial estimates of
# the rate and shape parameters of the gamma distribution,
# respectively
# x - vector of observations from the gamma process in question
# ... - extra optional parameters to be passed to optim
#
# output : the estimated parameters as a list
ests <- exp(optim(par, gamma.neg.log.lik, x=x,...)$par)
return(list(rate=ests[1],shape=ests[2]))
} |
#' annotatr: Annotation of Genomic Regions to Functional Annotations
#'
#' Given a set of genomic sites/regions (e.g. ChIP-seq peaks, CpGs, differentially methylated CpGs or regions, SNPs, etc.) it is often of interest to investigate the intersecting functional annotations. Such annotations include those relating to gene models (promoters, 5'UTRs, exons, introns, and 3'UTRs), CpGs (CpG islands, CpG shores, CpG shelves), the non-coding genome, and enhancers. The annotatr package provides an easy way to summarize and visualize the intersection of genomic sites/regions with the above functional annotations.
#'
#' @docType package
#' @name annotatr
#'
#' @import AnnotationDbi
#' @import AnnotationHub
#' @import dplyr
#' @import ggplot2
#' @import GenomicFeatures
#' @import GenomicRanges
#' @importClassesFrom GenomeInfoDb Seqinfo
#' @importFrom GenomeInfoDb seqnames seqlengths
#' @importFrom IRanges IRanges
#' @importFrom S4Vectors endoapply
#' @import methods
#' @importFrom readr read_tsv
#' @importFrom reshape2 melt
#' @importFrom regioneR randomizeRegions
#' @import rtracklayer
#' @importClassesFrom S4Vectors Hits Rle
#' @importFrom stats as.formula
#' @importFrom utils combn data
NULL
| /R/annotatr_package_doc.R | no_license | insilicolife/annotatr | R | false | false | 1,203 | r | #' annotatr: Annotation of Genomic Regions to Functional Annotations
#'
#' Given a set of genomic sites/regions (e.g. ChIP-seq peaks, CpGs, differentially methylated CpGs or regions, SNPs, etc.) it is often of interest to investigate the intersecting functional annotations. Such annotations include those relating to gene models (promoters, 5'UTRs, exons, introns, and 3'UTRs), CpGs (CpG islands, CpG shores, CpG shelves), the non-coding genome, and enhancers. The annotatr package provides an easy way to summarize and visualize the intersection of genomic sites/regions with the above functional annotations.
#'
#' @docType package
#' @name annotatr
#'
#' @import AnnotationDbi
#' @import AnnotationHub
#' @import dplyr
#' @import ggplot2
#' @import GenomicFeatures
#' @import GenomicRanges
#' @importClassesFrom GenomeInfoDb Seqinfo
#' @importFrom GenomeInfoDb seqnames seqlengths
#' @importFrom IRanges IRanges
#' @importFrom S4Vectors endoapply
#' @import methods
#' @importFrom readr read_tsv
#' @importFrom reshape2 melt
#' @importFrom regioneR randomizeRegions
#' @import rtracklayer
#' @importClassesFrom S4Vectors Hits Rle
#' @importFrom stats as.formula
#' @importFrom utils combn data
NULL
|
require(dplyr)
#read data
data <- read.csv("TelcoCustomerChurn.csv")
#converts rows to numerical
data <- data %>%
mutate(gender = ifelse(gender == "Female",0,1))
data <- data %>%
mutate(Partner = ifelse(Partner == "No",0,1))
data <- data %>%
mutate(Dependents = ifelse(Dependents == "No",0,1))
data <- data %>%
mutate(PhoneService = ifelse(PhoneService == "No",0,1))
data <- data %>%
mutate(MultipleLines = ifelse(MultipleLines == "Yes",1,0))
data <- data %>%
mutate(InternetService = ifelse(InternetService == "No",0,ifelse(InternetService == "DSL",1,2)))
data <- data %>%
mutate(OnlineSecurity = ifelse(OnlineSecurity == "Yes",1,0))
data <- data %>%
mutate(OnlineBackup = ifelse(OnlineBackup == "Yes",1,0))
data <- data %>%
mutate(DeviceProtection = ifelse(DeviceProtection == "Yes",1,0))
data <- data %>%
mutate(TechSupport = ifelse(TechSupport == "Yes",1,0))
data <- data %>%
mutate(StreamingTV = ifelse(StreamingTV == "Yes",1,0))
data <- data %>%
mutate(StreamingMovies = ifelse(StreamingMovies == "Yes",1,0))
data <- data %>%
mutate(Contract = ifelse(Contract == "Month-to-Month",1,ifelse(Contract == "One year",2,3)))
data <- data %>%
mutate(PaperlessBilling = ifelse(PaperlessBilling == "Yes",1,0))
data <- data %>%
mutate(TotalCharges = ifelse(is.na(TotalCharges),0,TotalCharges))
data <- data %>%
mutate(PaymentMethod = ifelse(PaymentMethod == "Electronic check",1,
ifelse(PaymentMethod == "Mailed check",2,
ifelse(PaymentMethod == "Bank transfer (automatic)",3,4))))
# data <- data %>%
# mutate(Churn = ifelse(Churn == "Yes",1,0))
#remove column id
cols.dont.want <- "customerID"
data <- data[, ! names(data) %in% cols.dont.want, drop = F]
#write to csv file
write.csv(data,"preprocessed.csv",row.names = FALSE) | /LLM/R/preprocessing.R | no_license | sharanyakamath/Customer-Churn-Prediction | R | false | false | 1,859 | r | require(dplyr)
#read data
data <- read.csv("TelcoCustomerChurn.csv")
#converts rows to numerical
data <- data %>%
mutate(gender = ifelse(gender == "Female",0,1))
data <- data %>%
mutate(Partner = ifelse(Partner == "No",0,1))
data <- data %>%
mutate(Dependents = ifelse(Dependents == "No",0,1))
data <- data %>%
mutate(PhoneService = ifelse(PhoneService == "No",0,1))
data <- data %>%
mutate(MultipleLines = ifelse(MultipleLines == "Yes",1,0))
data <- data %>%
mutate(InternetService = ifelse(InternetService == "No",0,ifelse(InternetService == "DSL",1,2)))
data <- data %>%
mutate(OnlineSecurity = ifelse(OnlineSecurity == "Yes",1,0))
data <- data %>%
mutate(OnlineBackup = ifelse(OnlineBackup == "Yes",1,0))
data <- data %>%
mutate(DeviceProtection = ifelse(DeviceProtection == "Yes",1,0))
data <- data %>%
mutate(TechSupport = ifelse(TechSupport == "Yes",1,0))
data <- data %>%
mutate(StreamingTV = ifelse(StreamingTV == "Yes",1,0))
data <- data %>%
mutate(StreamingMovies = ifelse(StreamingMovies == "Yes",1,0))
data <- data %>%
mutate(Contract = ifelse(Contract == "Month-to-Month",1,ifelse(Contract == "One year",2,3)))
data <- data %>%
mutate(PaperlessBilling = ifelse(PaperlessBilling == "Yes",1,0))
data <- data %>%
mutate(TotalCharges = ifelse(is.na(TotalCharges),0,TotalCharges))
data <- data %>%
mutate(PaymentMethod = ifelse(PaymentMethod == "Electronic check",1,
ifelse(PaymentMethod == "Mailed check",2,
ifelse(PaymentMethod == "Bank transfer (automatic)",3,4))))
# data <- data %>%
# mutate(Churn = ifelse(Churn == "Yes",1,0))
#remove column id
cols.dont.want <- "customerID"
data <- data[, ! names(data) %in% cols.dont.want, drop = F]
#write to csv file
write.csv(data,"preprocessed.csv",row.names = FALSE) |
####################################################
#
# Selecting trades for each questoin set for active and non-active periods
#
####################################################
start <- Sys.time()
print("Trade Selection started")
### sets dates of question exchanges
expStart <- as.POSIXct("2014-11-07")
expChange1 <- as.POSIXct("2014-12-07")
expChange2 <- as.POSIXct("2015-01-07")
expChange3 <- as.POSIXct("2015-02-07")
expStop <- as.POSIXct("2015-03-07")
### Generating sets of trades for each condition of Set A questions by removing non-applicable trades
### Creating dummy tat variables for experimenatl conditions
tatExp <- tat
## Removing trades before and after experiment
tatExp[tatExp<expStart] <- NA # NAs inplace of traded_at for trades before experiment start
tatExp[tatExp>=expStop] <- NA # NAs inplace of traded_at for trades after experiment end
good <- complete.cases(tatExp) # remove all trades not made during experiment
sum(!good)
tatExp<-tatExp[good]; titExp<-tit[good]; pitExp<-pit[good]; qitExp<-qit[good]; nvtExp<-nvt[good]; ovtExp<-ovt[good]; astExp<-ast[good]; apotExp<-apot[good]
citExp<-cit[good]; rstExp<-rst[good]; mdtExp<-mdt[good]; asqtExp<-asqt[good]; asotExp<-asot[good]
#######################
## Creating dummy variables for the active and control conditions for set A
tatAAct <- tatExp
tatACon <- tatExp
print(c(length(tatAAct),length(tatACon)))
## Removing trades when Set A not active
tatAAct[tatAAct>=expChange1 &tatAAct<expChange2] <- NA # Removing DEC trades
tatAAct[tatAAct>=expChange3] <- NA # removing FEB trades
goodAct <- complete.cases(tatAAct) # remove all set A trades while set B active => trades when set A active
sum(!goodAct)
tatAAct<-tatExp[goodAct]; titAAct<-titExp[goodAct]; pitAAct<-pitExp[goodAct]; qitAAct<-qitExp[goodAct]; nvtAAct<-nvtExp[goodAct]; ovtAAct<-ovtExp[goodAct]; astAAct<-astExp[goodAct]
apotAAct<-apotExp[goodAct]; citAAct<-citExp[goodAct]; rstAAct<-rstExp[goodAct]; mdtAAct<-mdtExp[goodAct]; asqtAAct<-asqtExp[goodAct]; asotAAct<-asotExp[goodAct]
print(length(tatAAct))
## Removing trades when Set A active
tatACon[tatACon<expChange1] <-NA # Removing Nov trades
tatACon[tatACon>=expChange2 &tatACon<expChange3] <-NA # Removing Jan Trades
goodCon <- complete.cases(tatACon) # remove all set A trades while set A active => trades when set A non-active
sum(!goodCon)
tatACon<-tatExp[goodCon]; titACon<-titExp[goodCon]; pitACon<-pitExp[goodCon]; qitACon<-qitExp[goodCon]; nvtACon<-nvtExp[goodCon]; ovtACon<-ovtExp[goodCon]; astACon<-astExp[goodCon]
apotACon<-apotExp[goodCon]; citACon<-citExp[goodCon]; rstACon<-rstExp[goodCon]; mdtACon<-mdtExp[goodCon]; asqtACon<-asqtExp[goodCon]; asotACon<-asotExp[goodCon]
print(length(tatACon))
duration <- as.double(difftime(Sys.time(),start,units="sec")) #reports time to retrieve files
print ("Trade Selection Complete")
print(duration) | /Incentive Overall Trade Selection B wo first Q 150212.R | no_license | Andy-Powell/SciCast_Andy | R | false | false | 3,007 | r | ####################################################
#
# Selecting trades for each questoin set for active and non-active periods
#
####################################################
start <- Sys.time()
print("Trade Selection started")
### sets dates of question exchanges
expStart <- as.POSIXct("2014-11-07")
expChange1 <- as.POSIXct("2014-12-07")
expChange2 <- as.POSIXct("2015-01-07")
expChange3 <- as.POSIXct("2015-02-07")
expStop <- as.POSIXct("2015-03-07")
### Generating sets of trades for each condition of Set A questions by removing non-applicable trades
### Creating dummy tat variables for experimenatl conditions
tatExp <- tat
## Removing trades before and after experiment
tatExp[tatExp<expStart] <- NA # NAs inplace of traded_at for trades before experiment start
tatExp[tatExp>=expStop] <- NA # NAs inplace of traded_at for trades after experiment end
good <- complete.cases(tatExp) # remove all trades not made during experiment
sum(!good)
tatExp<-tatExp[good]; titExp<-tit[good]; pitExp<-pit[good]; qitExp<-qit[good]; nvtExp<-nvt[good]; ovtExp<-ovt[good]; astExp<-ast[good]; apotExp<-apot[good]
citExp<-cit[good]; rstExp<-rst[good]; mdtExp<-mdt[good]; asqtExp<-asqt[good]; asotExp<-asot[good]
#######################
## Creating dummy variables for the active and control conditions for set A
tatAAct <- tatExp
tatACon <- tatExp
print(c(length(tatAAct),length(tatACon)))
## Removing trades when Set A not active
tatAAct[tatAAct>=expChange1 &tatAAct<expChange2] <- NA # Removing DEC trades
tatAAct[tatAAct>=expChange3] <- NA # removing FEB trades
goodAct <- complete.cases(tatAAct) # remove all set A trades while set B active => trades when set A active
sum(!goodAct)
tatAAct<-tatExp[goodAct]; titAAct<-titExp[goodAct]; pitAAct<-pitExp[goodAct]; qitAAct<-qitExp[goodAct]; nvtAAct<-nvtExp[goodAct]; ovtAAct<-ovtExp[goodAct]; astAAct<-astExp[goodAct]
apotAAct<-apotExp[goodAct]; citAAct<-citExp[goodAct]; rstAAct<-rstExp[goodAct]; mdtAAct<-mdtExp[goodAct]; asqtAAct<-asqtExp[goodAct]; asotAAct<-asotExp[goodAct]
print(length(tatAAct))
## Removing trades when Set A active
tatACon[tatACon<expChange1] <-NA # Removing Nov trades
tatACon[tatACon>=expChange2 &tatACon<expChange3] <-NA # Removing Jan Trades
goodCon <- complete.cases(tatACon) # remove all set A trades while set A active => trades when set A non-active
sum(!goodCon)
tatACon<-tatExp[goodCon]; titACon<-titExp[goodCon]; pitACon<-pitExp[goodCon]; qitACon<-qitExp[goodCon]; nvtACon<-nvtExp[goodCon]; ovtACon<-ovtExp[goodCon]; astACon<-astExp[goodCon]
apotACon<-apotExp[goodCon]; citACon<-citExp[goodCon]; rstACon<-rstExp[goodCon]; mdtACon<-mdtExp[goodCon]; asqtACon<-asqtExp[goodCon]; asotACon<-asotExp[goodCon]
print(length(tatACon))
duration <- as.double(difftime(Sys.time(),start,units="sec")) #reports time to retrieve files
print ("Trade Selection Complete")
print(duration) |
data.lineplot <- data.counts %>%
filter(version %in% version.include) %>%
mutate(sameErr = abs(resp.h.pos) == abs(resp.v.pos),
erMag = abs(resp.h.pos),
sameObj= resp.h.pos == resp.v.pos,
pos = resp.h.pos,
partMatch = case_when(
resp.h.hv == 1 & resp.v.hv == 2 ~ 'correct',
resp.h.hv == 2 & resp.v.hv == 1 ~ 'swapped',
resp.h.hv == resp.v.hv ~ 'repeated',
TRUE ~ 'error')) %>%
group_by(experiment, subjectID, version) %>%
mutate(p.smooth = (n+1)/sum(n+1),
log.p = log(p.smooth)) %>%
filter(!sameObj,
sameErr,
partMatch != 'repeated') %>%
ungroup() %>%
group_by(experiment, version, partMatch, erMag) %>%
summarise(n.ss=n(),
mean.log.p = mean(log.p),
sd.log.p = sd(log.p),
sem.log.p = sd.log.p/sqrt(n.ss)) %>%
mutate(description = (factor(version.code[as.character(version)],
levels = version.order)))
data.lineplot %>%
ggplot(aes(x = erMag,
y = mean.log.p,
ymin = mean.log.p-2*sem.log.p,
ymax=mean.log.p+2*sem.log.p,
color=partMatch))+
facet_grid(experiment ~ description)+
# geom_label(data=version.summary %>% filter(version %in% version.include),
# aes(x=0,y=-4, label=paste0('n=',subjects),
# ymin=NULL,ymax=NULL,color=NULL),
# size=3)+
geom_pointrange(size=0.3)+
geom_line() +
scale_color_manual(values=c('correct'='blue',
'repeated'='gray',
'swapped'='red',
'error' = 'orange'))+
scale_x_continuous(breaks=c(1,2), limits = c(0.5,2.5))+
theme_minimal()+
theme(strip.text = element_text(face='bold'),
legend.position = 'right',
panel.grid.minor = element_blank())+
labs(title = 'Probability of reported colors',
subtitle = 'normalized after removing repetitions; showing only same source object',
y = 'mean log(prob) +/- 2 sem',
x = 'source object position (0=target)')
| /analysis/lineplots.diff-obj.R | no_license | vullab/ColorBinding | R | false | false | 2,125 | r |
data.lineplot <- data.counts %>%
filter(version %in% version.include) %>%
mutate(sameErr = abs(resp.h.pos) == abs(resp.v.pos),
erMag = abs(resp.h.pos),
sameObj= resp.h.pos == resp.v.pos,
pos = resp.h.pos,
partMatch = case_when(
resp.h.hv == 1 & resp.v.hv == 2 ~ 'correct',
resp.h.hv == 2 & resp.v.hv == 1 ~ 'swapped',
resp.h.hv == resp.v.hv ~ 'repeated',
TRUE ~ 'error')) %>%
group_by(experiment, subjectID, version) %>%
mutate(p.smooth = (n+1)/sum(n+1),
log.p = log(p.smooth)) %>%
filter(!sameObj,
sameErr,
partMatch != 'repeated') %>%
ungroup() %>%
group_by(experiment, version, partMatch, erMag) %>%
summarise(n.ss=n(),
mean.log.p = mean(log.p),
sd.log.p = sd(log.p),
sem.log.p = sd.log.p/sqrt(n.ss)) %>%
mutate(description = (factor(version.code[as.character(version)],
levels = version.order)))
data.lineplot %>%
ggplot(aes(x = erMag,
y = mean.log.p,
ymin = mean.log.p-2*sem.log.p,
ymax=mean.log.p+2*sem.log.p,
color=partMatch))+
facet_grid(experiment ~ description)+
# geom_label(data=version.summary %>% filter(version %in% version.include),
# aes(x=0,y=-4, label=paste0('n=',subjects),
# ymin=NULL,ymax=NULL,color=NULL),
# size=3)+
geom_pointrange(size=0.3)+
geom_line() +
scale_color_manual(values=c('correct'='blue',
'repeated'='gray',
'swapped'='red',
'error' = 'orange'))+
scale_x_continuous(breaks=c(1,2), limits = c(0.5,2.5))+
theme_minimal()+
theme(strip.text = element_text(face='bold'),
legend.position = 'right',
panel.grid.minor = element_blank())+
labs(title = 'Probability of reported colors',
subtitle = 'normalized after removing repetitions; showing only same source object',
y = 'mean log(prob) +/- 2 sem',
x = 'source object position (0=target)')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TimeStratPetersenDiagError_fit.R
\name{TimeStratPetersenDiagError_fit}
\alias{TimeStratPetersenDiagError_fit}
\title{Wrapper (*_fit) to call the Time Statified Petersen Estimator
with Diagonal Entries function.}
\usage{
TimeStratPetersenDiagError_fit(title = "TSDPE", prefix = "TSPDE-",
time, n1, m2, u2, sampfrac = rep(1, length(u2)), jump.after = NULL,
bad.n1 = c(), bad.m2 = c(), bad.u2 = c(), logitP.cov = rep(1,
length(n1)), logitP.fixed = NULL, logitP.fixed.values = NULL,
n.chains = 3, n.iter = 2e+05, n.burnin = 1e+05, n.sims = 2000,
tauU.alpha = 1, tauU.beta = 0.05, taueU.alpha = 1,
taueU.beta = 0.05, mu_xiP = logit(sum(m2, na.rm = TRUE)/sum(n1, na.rm
= TRUE)), tau_xiP = 1/var(logit((m2 + 0.5)/(n1 + 1)), na.rm = TRUE),
tauP.alpha = 0.001, tauP.beta = 0.001, run.prob = seq(0, 1, 0.1),
debug = FALSE, debug2 = FALSE, InitialSeed = ceiling(runif(1, min =
0, max = 1e+06)), save.output.to.files = TRUE)
}
\arguments{
\item{title}{A character string used for a title on reports and graphs}
\item{prefix}{A character string used as the prefix for created files. All
created graph files are of the form prefix-xxxxx.pdf.}
\item{time}{A numeric vector of time used to label the strata. For example,
this could be julian week for data stratified at a weekly level.}
\item{n1}{A numeric vector of the number of marked fish released in each
time stratum.}
\item{m2}{A numeric vector of the number of marked fish from n1 that are
recaptured in each time stratum. All recaptures take place within the
stratum of release.}
\item{u2}{A numeric vector of the number of unmarked fish captured in each
stratum. These will be expanded by the capture efficiency to estimate the
population size in each stratum.}
\item{sampfrac}{\strong{Depricated} because it really doesn't work as intended.
A numeric vector with entries between 0 and 1 indicating
what fraction of the stratum was sampled. For example, if strata are
calendar weeks, and sampling occurred only on 3 of the 7 days, then the
value of \code{sampfrac} for that stratum would be 3/7.}
\item{jump.after}{A numeric vector with elements belonging to \code{time}.
In some cases, the spline fitting the population numbers should be allowed
to jump. For example, the population size could take a jump when hatchery
released fish suddenly arrive at the trap. The jumps occur AFTER the strata
listed in this argument.}
\item{bad.n1}{A numeric vector with elements belonging to \code{time}. In
some cases, something goes wrong in the stratum, and the number of marked
fish released should be ignored. The values of \code{m2} for this stratum
will also be set to NA for these strata.}
\item{bad.m2}{A numeric vector with elements belonging to \code{time}. In
some cases, something goes wrong in the stratum, and the number of recovered
fish should be ignored. For example, poor handling is suspected to induce
handling induced mortality in the marked fish and so only very few are
recovered. The values of \code{m2} will be set to NA for these strata.}
\item{bad.u2}{A numeric vector with elements belonging to \code{time}. In
some cases, something goes wrong in the stratum, and the number of unmarked
fish should be ignored. For example, the trap didn't work properly in this
stratum. The values of \code{u2} will be set to NA for these strata.}
\item{logitP.cov}{A numeric matrix for covariates to fit the
logit(catchability). Default is a single intercept, i.e. all strata have the
same mean logit(catchability).}
\item{logitP.fixed}{A numeric vector (could be null) of the time strata
where the logit(P) whould be fixed. Typically, this is used when the capture
rates for some strata are 0 and logit(P) is set to -10 for these strata. The
fixed values are given in \code{logitP.fixed.values}}
\item{logitP.fixed.values}{A numerical vector (could be null) of the fixed
values for logit(P) at strata given by logitP.fixed. Typically this is used
when certain strata have a 0 capture rate and the fixed value is set to -10
which on the logit scale gives p[i] essentially 0. Don't specify values such
as -50 because numerical problems could occur in WinBugs/OpenBugs.}
\item{n.chains}{Number of parallel MCMC chains to fit.}
\item{n.iter}{Total number of MCMC iterations in each chain.}
\item{n.burnin}{Number of burn-in iterations.}
\item{n.sims}{Number of simulated values to keeps for posterior
distribution.}
\item{tauU.alpha}{One of the parameters along with \code{tauU.beta} for the
prior for the variance of the random noise for the smoothing spline.}
\item{tauU.beta}{One of the parameters along with \code{tauU.alpha} for the
prior for the variance of the random noise for the smoothing spline.}
\item{taueU.alpha}{One of the parameters along with \code{taueU.beta} for
the prior for the variance of noise around the spline.}
\item{taueU.beta}{One of the parameters along with \code{taueU.alpha} for
the prior for the variance of noise around the spline.}
\item{mu_xiP}{One of the parameters for the prior for the mean of the
logit(catchability) across strata}
\item{tau_xiP}{One of the parameter for the prior for the mean of the
logit(catchability) across strata}
\item{tauP.alpha}{One of the parameters for the prior for the variance in
logit(catchability) among strata}
\item{tauP.beta}{One of the parameters for the prior for the variance in
logit(catchability) among strata}
\item{run.prob}{Numeric vector indicating percentiles of run timing should
be computed.}
\item{debug}{Logical flag indicating if a debugging run should be made. In
the debugging run, the number of samples in the posterior is reduced
considerably for a quick turn around.}
\item{debug2}{Logical flag indicated if additional debugging information is
produced. Normally the functions will halt at \code{browser()} calls to
allow the user to peek into the internal variables. Not useful except to
package developers.}
\item{InitialSeed}{Numeric value used to initialize the random numbers used
in the MCMC iterations.}
\item{save.output.to.files}{Should the plots and text output be save to the files
in addition to being stored in the MCMC object?}
}
\value{
An MCMC object with samples from the posterior distribution. A
series of graphs and text file are also created in the working directory.
}
\description{
Takes the number of marked fish released, the number of recaptures, and the
number of unmarked fish and uses Bayesian methods to fit a fit a spline
through the population numbers and a hierarchical model for the trap
efficiencies over time. The output is written to files and an MCMC object
is also created with samples from the posterior.
}
\details{
Normally, the wrapper (*_fit) function is called which then calls the
fitting routine.
Use the \code{\link{TimeStratPetersenNonDiagError_fit}} function for cases
where recaptures take place outside the stratum of release.
}
\references{
Bonner, S. J., & Schwarz, C. J. (2011).
Smoothing population size estimates for Time-Stratified Mark-Recapture experiments Using Bayesian P-Splines.
Biometrics, 67, 1498-1507.
\url{https://doi.org/10.1111/j.1541-0420.2011.01599.x}
Schwarz, C. J., & Dempson, J. B. (1994).
Mark-recapture estimation of a salmon smolt population.
Biometrics, 50, 98-108.
Schwarz, C.J. et al. (2009) Trinity River Restoration Project Report available at
\url{http://www.stat.sfu.ca/~cschwarz/Consulting/Trinity/Phase2}
}
\author{
Bonner, S.J. \email{sbonner6@uwo.ca} and
Schwarz, C. J. \email{cschwarz.stat.sfu.ca@gmail.com}.
}
\keyword{~models}
\keyword{~smooth}
| /man/TimeStratPetersenDiagError_fit.Rd | no_license | SOLV-Code/BTSPAS | R | false | true | 7,618 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TimeStratPetersenDiagError_fit.R
\name{TimeStratPetersenDiagError_fit}
\alias{TimeStratPetersenDiagError_fit}
\title{Wrapper (*_fit) to call the Time Statified Petersen Estimator
with Diagonal Entries function.}
\usage{
TimeStratPetersenDiagError_fit(title = "TSDPE", prefix = "TSPDE-",
time, n1, m2, u2, sampfrac = rep(1, length(u2)), jump.after = NULL,
bad.n1 = c(), bad.m2 = c(), bad.u2 = c(), logitP.cov = rep(1,
length(n1)), logitP.fixed = NULL, logitP.fixed.values = NULL,
n.chains = 3, n.iter = 2e+05, n.burnin = 1e+05, n.sims = 2000,
tauU.alpha = 1, tauU.beta = 0.05, taueU.alpha = 1,
taueU.beta = 0.05, mu_xiP = logit(sum(m2, na.rm = TRUE)/sum(n1, na.rm
= TRUE)), tau_xiP = 1/var(logit((m2 + 0.5)/(n1 + 1)), na.rm = TRUE),
tauP.alpha = 0.001, tauP.beta = 0.001, run.prob = seq(0, 1, 0.1),
debug = FALSE, debug2 = FALSE, InitialSeed = ceiling(runif(1, min =
0, max = 1e+06)), save.output.to.files = TRUE)
}
\arguments{
\item{title}{A character string used for a title on reports and graphs}
\item{prefix}{A character string used as the prefix for created files. All
created graph files are of the form prefix-xxxxx.pdf.}
\item{time}{A numeric vector of time used to label the strata. For example,
this could be julian week for data stratified at a weekly level.}
\item{n1}{A numeric vector of the number of marked fish released in each
time stratum.}
\item{m2}{A numeric vector of the number of marked fish from n1 that are
recaptured in each time stratum. All recaptures take place within the
stratum of release.}
\item{u2}{A numeric vector of the number of unmarked fish captured in each
stratum. These will be expanded by the capture efficiency to estimate the
population size in each stratum.}
\item{sampfrac}{\strong{Depricated} because it really doesn't work as intended.
A numeric vector with entries between 0 and 1 indicating
what fraction of the stratum was sampled. For example, if strata are
calendar weeks, and sampling occurred only on 3 of the 7 days, then the
value of \code{sampfrac} for that stratum would be 3/7.}
\item{jump.after}{A numeric vector with elements belonging to \code{time}.
In some cases, the spline fitting the population numbers should be allowed
to jump. For example, the population size could take a jump when hatchery
released fish suddenly arrive at the trap. The jumps occur AFTER the strata
listed in this argument.}
\item{bad.n1}{A numeric vector with elements belonging to \code{time}. In
some cases, something goes wrong in the stratum, and the number of marked
fish released should be ignored. The values of \code{m2} for this stratum
will also be set to NA for these strata.}
\item{bad.m2}{A numeric vector with elements belonging to \code{time}. In
some cases, something goes wrong in the stratum, and the number of recovered
fish should be ignored. For example, poor handling is suspected to induce
handling induced mortality in the marked fish and so only very few are
recovered. The values of \code{m2} will be set to NA for these strata.}
\item{bad.u2}{A numeric vector with elements belonging to \code{time}. In
some cases, something goes wrong in the stratum, and the number of unmarked
fish should be ignored. For example, the trap didn't work properly in this
stratum. The values of \code{u2} will be set to NA for these strata.}
\item{logitP.cov}{A numeric matrix for covariates to fit the
logit(catchability). Default is a single intercept, i.e. all strata have the
same mean logit(catchability).}
\item{logitP.fixed}{A numeric vector (could be null) of the time strata
where the logit(P) whould be fixed. Typically, this is used when the capture
rates for some strata are 0 and logit(P) is set to -10 for these strata. The
fixed values are given in \code{logitP.fixed.values}}
\item{logitP.fixed.values}{A numerical vector (could be null) of the fixed
values for logit(P) at strata given by logitP.fixed. Typically this is used
when certain strata have a 0 capture rate and the fixed value is set to -10
which on the logit scale gives p[i] essentially 0. Don't specify values such
as -50 because numerical problems could occur in WinBugs/OpenBugs.}
\item{n.chains}{Number of parallel MCMC chains to fit.}
\item{n.iter}{Total number of MCMC iterations in each chain.}
\item{n.burnin}{Number of burn-in iterations.}
\item{n.sims}{Number of simulated values to keeps for posterior
distribution.}
\item{tauU.alpha}{One of the parameters along with \code{tauU.beta} for the
prior for the variance of the random noise for the smoothing spline.}
\item{tauU.beta}{One of the parameters along with \code{tauU.alpha} for the
prior for the variance of the random noise for the smoothing spline.}
\item{taueU.alpha}{One of the parameters along with \code{taueU.beta} for
the prior for the variance of noise around the spline.}
\item{taueU.beta}{One of the parameters along with \code{taueU.alpha} for
the prior for the variance of noise around the spline.}
\item{mu_xiP}{One of the parameters for the prior for the mean of the
logit(catchability) across strata}
\item{tau_xiP}{One of the parameter for the prior for the mean of the
logit(catchability) across strata}
\item{tauP.alpha}{One of the parameters for the prior for the variance in
logit(catchability) among strata}
\item{tauP.beta}{One of the parameters for the prior for the variance in
logit(catchability) among strata}
\item{run.prob}{Numeric vector indicating percentiles of run timing should
be computed.}
\item{debug}{Logical flag indicating if a debugging run should be made. In
the debugging run, the number of samples in the posterior is reduced
considerably for a quick turn around.}
\item{debug2}{Logical flag indicated if additional debugging information is
produced. Normally the functions will halt at \code{browser()} calls to
allow the user to peek into the internal variables. Not useful except to
package developers.}
\item{InitialSeed}{Numeric value used to initialize the random numbers used
in the MCMC iterations.}
\item{save.output.to.files}{Should the plots and text output be save to the files
in addition to being stored in the MCMC object?}
}
\value{
An MCMC object with samples from the posterior distribution. A
series of graphs and text file are also created in the working directory.
}
\description{
Takes the number of marked fish released, the number of recaptures, and the
number of unmarked fish and uses Bayesian methods to fit a fit a spline
through the population numbers and a hierarchical model for the trap
efficiencies over time. The output is written to files and an MCMC object
is also created with samples from the posterior.
}
\details{
Normally, the wrapper (*_fit) function is called which then calls the
fitting routine.
Use the \code{\link{TimeStratPetersenNonDiagError_fit}} function for cases
where recaptures take place outside the stratum of release.
}
\references{
Bonner, S. J., & Schwarz, C. J. (2011).
Smoothing population size estimates for Time-Stratified Mark-Recapture experiments Using Bayesian P-Splines.
Biometrics, 67, 1498-1507.
\url{https://doi.org/10.1111/j.1541-0420.2011.01599.x}
Schwarz, C. J., & Dempson, J. B. (1994).
Mark-recapture estimation of a salmon smolt population.
Biometrics, 50, 98-108.
Schwarz, C.J. et al. (2009) Trinity River Restoration Project Report available at
\url{http://www.stat.sfu.ca/~cschwarz/Consulting/Trinity/Phase2}
}
\author{
Bonner, S.J. \email{sbonner6@uwo.ca} and
Schwarz, C. J. \email{cschwarz.stat.sfu.ca@gmail.com}.
}
\keyword{~models}
\keyword{~smooth}
|
library(tidyverse)
2 + 3
| /code.R | no_license | jpmoroge/homework-0 | R | false | false | 27 | r | library(tidyverse)
2 + 3
|
#######################################################
## Figure 3: Estimated abundance by species and year ##
#######################################################
#Read in data
load('output/output_hee_abundance_Nmix.Rdata')
#Grab output from JAGS
control = abundance.out$sims.list$yearly.control
shelter = abundance.out$sims.list$yearly.shelter
patch04 = abundance.out$sims.list$yearly.patch04
patch2 = abundance.out$sims.list$yearly.patch2
clear = abundance.out$sims.list$yearly.clear
#Bundle into data array
data = array(data=NA,dim=c(abundance.out$mcmc.info$n.samples,4,5,5))
data[,,,1] = control
data[,,,2] = shelter
data[,,,3] = patch04
data[,,,4] = patch2
data[,,4,4] = NA
data[,,,5] = clear
#Graph function
abun.graph = function(species,ymax,title,ylabel,xlabel,leg){
#Check if ylabel should be included
if(ylabel==TRUE){
plot(1,type='n', xaxt="n",xlab="",ylab="Relative abundance", main=title,
xlim=c(0.5,5.5), ylim=c(0,ymax))}
if(ylabel==FALSE){
plot(1,type='n', xaxt="n",xlab="",ylab="", main=title,
xlim=c(0.5,5.5), ylim=c(0,ymax))}
#Check if xlabel should be included
if(xlabel==TRUE){
axis(side=1,at=c(1,2,3,4,5), labels=c('2007', '2008', '2009','2010','2011'),
tck=0)}
#Set colors/species index
colors = gray(seq(0.4,0.1,length=5))
s=species
#Generate coordinates for lines
#Plot error bars
xcoord = matrix(nrow=5,ncol=5)
ycoord = matrix(nrow=5,ncol=5)
structure = c(1,2,3,4,5)
offset = c(-0.2,-0.1,0,0.1,0.2)
points = c(17,18,21,22,24)
for (i in 1:5){
for (j in 1:5){
ycoord[i,j] = mean(data[,s,i,j])
xcoord[i,j] = i + offset[j]
lims <- quantile(data[,s,i,j],c(0.1587,0.8413),na.rm=T)
points(xcoord[i,j],ycoord[i,j],pch=points[j],col=colors[j],cex=1.5)
segments(x0=xcoord[i,j],y0=lims[1],x1=xcoord[i,j],y1=lims[2], col=colors)
}
}
#Draw pre/post harvest separation line
abline(v=2.5)
#Draw lines connecting years (accounting for missing 2 ha data in 2010)
for (k in 1:3){
lines(x=xcoord[,k],y=ycoord[,k],type="l",pch=points[k],cex=1.5,col=colors[k])
}
for (k in 4:4){
lines(x=xcoord[1:3,k],y=ycoord[1:3,k],type="l",pch=points[k],cex=1.5,col=colors[k])
}
for (k in 5:5){
lines(x=xcoord[,k],y=ycoord[,k],type="l",pch=points[k],cex=1.5,col=colors[k])
}
#Plot legend
if(leg==TRUE){
legend('topleft',rev(c("Control","Shelter","4 ha cut","2 ha cut", "0.4 ha cut")),cex=1,
pch=c(21,22,24,18,17),lwd=1,col=rev(colors),bg="white",bty="n")}
}
#Run abundance graph function for each species
par(fig=c(0,0.53,0.43,1),new=FALSE)
abun.graph(1,14,"Eastern chipmunk",ylabel=TRUE,xlabel=FALSE,leg=TRUE)
par(fig=c(0.47,1,0.43,1),new=TRUE)
abun.graph(2,20,"White-footed mouse",ylabel=FALSE,xlabel=FALSE,leg=FALSE)
par(fig=c(0,0.53,0,0.57),new=TRUE)
abun.graph(3,5,"Short-tailed shrew",ylabel=TRUE,xlabel=TRUE,leg=FALSE)
par(fig=c(0.47,1,0,0.57),new=TRUE)
abun.graph(4,2.5,"Pine vole",ylabel=FALSE,xlabel=TRUE,leg=FALSE)
| /figures/fig3_abundance_time.R | permissive | pablovaldes/mammal-abundance | R | false | false | 2,875 | r | #######################################################
## Figure 3: Estimated abundance by species and year ##
#######################################################
#Read in data
load('output/output_hee_abundance_Nmix.Rdata')
#Grab output from JAGS
control = abundance.out$sims.list$yearly.control
shelter = abundance.out$sims.list$yearly.shelter
patch04 = abundance.out$sims.list$yearly.patch04
patch2 = abundance.out$sims.list$yearly.patch2
clear = abundance.out$sims.list$yearly.clear
#Bundle into data array
data = array(data=NA,dim=c(abundance.out$mcmc.info$n.samples,4,5,5))
data[,,,1] = control
data[,,,2] = shelter
data[,,,3] = patch04
data[,,,4] = patch2
data[,,4,4] = NA
data[,,,5] = clear
#Graph function
abun.graph = function(species,ymax,title,ylabel,xlabel,leg){
#Check if ylabel should be included
if(ylabel==TRUE){
plot(1,type='n', xaxt="n",xlab="",ylab="Relative abundance", main=title,
xlim=c(0.5,5.5), ylim=c(0,ymax))}
if(ylabel==FALSE){
plot(1,type='n', xaxt="n",xlab="",ylab="", main=title,
xlim=c(0.5,5.5), ylim=c(0,ymax))}
#Check if xlabel should be included
if(xlabel==TRUE){
axis(side=1,at=c(1,2,3,4,5), labels=c('2007', '2008', '2009','2010','2011'),
tck=0)}
#Set colors/species index
colors = gray(seq(0.4,0.1,length=5))
s=species
#Generate coordinates for lines
#Plot error bars
xcoord = matrix(nrow=5,ncol=5)
ycoord = matrix(nrow=5,ncol=5)
structure = c(1,2,3,4,5)
offset = c(-0.2,-0.1,0,0.1,0.2)
points = c(17,18,21,22,24)
for (i in 1:5){
for (j in 1:5){
ycoord[i,j] = mean(data[,s,i,j])
xcoord[i,j] = i + offset[j]
lims <- quantile(data[,s,i,j],c(0.1587,0.8413),na.rm=T)
points(xcoord[i,j],ycoord[i,j],pch=points[j],col=colors[j],cex=1.5)
segments(x0=xcoord[i,j],y0=lims[1],x1=xcoord[i,j],y1=lims[2], col=colors)
}
}
#Draw pre/post harvest separation line
abline(v=2.5)
#Draw lines connecting years (accounting for missing 2 ha data in 2010)
for (k in 1:3){
lines(x=xcoord[,k],y=ycoord[,k],type="l",pch=points[k],cex=1.5,col=colors[k])
}
for (k in 4:4){
lines(x=xcoord[1:3,k],y=ycoord[1:3,k],type="l",pch=points[k],cex=1.5,col=colors[k])
}
for (k in 5:5){
lines(x=xcoord[,k],y=ycoord[,k],type="l",pch=points[k],cex=1.5,col=colors[k])
}
#Plot legend
if(leg==TRUE){
legend('topleft',rev(c("Control","Shelter","4 ha cut","2 ha cut", "0.4 ha cut")),cex=1,
pch=c(21,22,24,18,17),lwd=1,col=rev(colors),bg="white",bty="n")}
}
#Run abundance graph function for each species
par(fig=c(0,0.53,0.43,1),new=FALSE)
abun.graph(1,14,"Eastern chipmunk",ylabel=TRUE,xlabel=FALSE,leg=TRUE)
par(fig=c(0.47,1,0.43,1),new=TRUE)
abun.graph(2,20,"White-footed mouse",ylabel=FALSE,xlabel=FALSE,leg=FALSE)
par(fig=c(0,0.53,0,0.57),new=TRUE)
abun.graph(3,5,"Short-tailed shrew",ylabel=TRUE,xlabel=TRUE,leg=FALSE)
par(fig=c(0.47,1,0,0.57),new=TRUE)
abun.graph(4,2.5,"Pine vole",ylabel=FALSE,xlabel=TRUE,leg=FALSE)
|
# GGPlot package
# install.packages("ggplot2")
help(package = "ggplot2")
library("ggplot2")
weights <- c(300, 200, 100, 250, 150)
prices <- c(9000, 5000, 12000, 7500, 18000)
chests <- c('gold', 'silver', 'gems', 'gold', 'gems')
types <- factor(chests)
qplot(weights, prices, color = types)
# Other Topics yet left :
# Factors
# Functions
# Class
#
| /Tuts6.R | permissive | MrL1605/RTuts | R | false | false | 358 | r |
# GGPlot package
# install.packages("ggplot2")
help(package = "ggplot2")
library("ggplot2")
weights <- c(300, 200, 100, 250, 150)
prices <- c(9000, 5000, 12000, 7500, 18000)
chests <- c('gold', 'silver', 'gems', 'gold', 'gems')
types <- factor(chests)
qplot(weights, prices, color = types)
# Other Topics yet left :
# Factors
# Functions
# Class
#
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38585602708738e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615836374-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38585602708738e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PilotDataClass.R
\name{pilotData}
\alias{pilotData}
\title{User friendly interface to class "PilotData"}
\usage{
pilotData(
statistics = NULL,
samplesize = NULL,
distribution = c("norm", "t", "f", "chisq"),
...
)
}
\arguments{
\item{statistics}{vector of test statistics}
\item{samplesize}{total sample size of the pilot-data or effective sample size in two-group case (see Details for more information).}
\item{distribution}{type of the null/alternative distribution, one of 'norm', 't', 'f' or 'chisq'}
\item{...}{additional arguments for the distribution like degrees of freedom}
}
\value{
object of class "PilotData"
}
\description{
User friendly interface to class "PilotData"
}
\details{
details follow
In the two-group case the effective sample size is defined as the square-root of the inverse of 1/n1 + 1/n2.
}
\examples{
pd <- pilotData(statistics=rnorm(100), samplesize=10, distribution="norm")
pd
plot(pd)
}
\author{
Maarten van Iterson
}
| /man/pilotData.Rd | no_license | Rommelio-coli/SSPA | R | false | true | 1,040 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PilotDataClass.R
\name{pilotData}
\alias{pilotData}
\title{User friendly interface to class "PilotData"}
\usage{
pilotData(
statistics = NULL,
samplesize = NULL,
distribution = c("norm", "t", "f", "chisq"),
...
)
}
\arguments{
\item{statistics}{vector of test statistics}
\item{samplesize}{total sample size of the pilot-data or effective sample size in two-group case (see Details for more information).}
\item{distribution}{type of the null/alternative distribution, one of 'norm', 't', 'f' or 'chisq'}
\item{...}{additional arguments for the distribution like degrees of freedom}
}
\value{
object of class "PilotData"
}
\description{
User friendly interface to class "PilotData"
}
\details{
details follow
In the two-group case the effective sample size is defined as the square-root of the inverse of 1/n1 + 1/n2.
}
\examples{
pd <- pilotData(statistics=rnorm(100), samplesize=10, distribution="norm")
pd
plot(pd)
}
\author{
Maarten van Iterson
}
|
# Clifford Attractors
# xn+1 = sin(a yn) + c cos(a xn)
# yn+1 = sin(b xn) + d cos(b yn)
# http://paulbourke.net/fractals/clifford/
# https://github.com/petewerner/misc/blob/master/attractor.R
# https://es.wikipedia.org/wiki/Atractor
library(compiler)
library(viridis)
mapxy <- function(x, y, xmin, xmax, ymin=xmin, ymax=xmax) {
sx <- (width - 1) / (xmax - xmin)
sy <- (height - 1) / (ymax - ymin)
row0 <- round( sx * (x - xmin) )
col0 <- round( sy * (y - ymin) )
col0 * height + row0 + 1
}
clifford <- function(x, y) {
ac <- abs(c)+1
ad <- abs(d)+1
nidxs <- length(mat)
counts <- integer(length=nidxs)
for (i in 1:npoints) {
xt <- sin(a * y) + c * cos(a * x)
y <- sin(b * x) + d * cos(b * y)
x <- xt
idxs <- mapxy(x, y, -ac, ac, -ad, ad)
counts <- counts + tabulate(idxs, nbins=nidxs)
}
mat <<- mat + counts
}
npoints <- 10e3
n <- 100000
width <- 600
height <- 600
#make some random points
rsamp <- matrix(runif(n * 2, min=-2, max=2), nr=n)
setCompilerOptions(suppressAll=TRUE)
mapxy <- cmpfun(mapxy)
clifford <- cmpfun(clifford)
# Colors
cvec <- viridis(100)
# Initial set up (a, b, c, d)
set.seed(2)
initialvalues <- data.frame(replicate(4,round(runif(10, min=-2, max=2), 2)))
for (i in 1:nrow(initialvalues)) {
a <- initialvalues[i,1]
b <- initialvalues[i,2]
c <- initialvalues[i,3]
d <- initialvalues[i,4]
cat("Iteration: ", i)
cat("a: ", a, "b: ", b, "c: ", c, "d: ", d)
mat <- matrix(0, nr=height, nc=width)
clifford(rsamp[,1], rsamp[,2])
dens <- log(mat + 1)/log(max(mat))
par(mar=c(0, 0, 0, 0))
png(filename = paste0("images/NewNewNewNewClifford", i, ".png"), width = 1200,
height = 1200, pointsize = 0.1)
image(t(dens), col=cvec, useRaster=T, xaxt='n', yaxt='n')
dev.off()
cat('Generated image ', i)
} | /attractors/clifford.R | no_license | chucheria/CCR | R | false | false | 1,813 | r | # Clifford Attractors
# xn+1 = sin(a yn) + c cos(a xn)
# yn+1 = sin(b xn) + d cos(b yn)
# http://paulbourke.net/fractals/clifford/
# https://github.com/petewerner/misc/blob/master/attractor.R
# https://es.wikipedia.org/wiki/Atractor
library(compiler)
library(viridis)
mapxy <- function(x, y, xmin, xmax, ymin=xmin, ymax=xmax) {
sx <- (width - 1) / (xmax - xmin)
sy <- (height - 1) / (ymax - ymin)
row0 <- round( sx * (x - xmin) )
col0 <- round( sy * (y - ymin) )
col0 * height + row0 + 1
}
clifford <- function(x, y) {
ac <- abs(c)+1
ad <- abs(d)+1
nidxs <- length(mat)
counts <- integer(length=nidxs)
for (i in 1:npoints) {
xt <- sin(a * y) + c * cos(a * x)
y <- sin(b * x) + d * cos(b * y)
x <- xt
idxs <- mapxy(x, y, -ac, ac, -ad, ad)
counts <- counts + tabulate(idxs, nbins=nidxs)
}
mat <<- mat + counts
}
npoints <- 10e3
n <- 100000
width <- 600
height <- 600
#make some random points
rsamp <- matrix(runif(n * 2, min=-2, max=2), nr=n)
setCompilerOptions(suppressAll=TRUE)
mapxy <- cmpfun(mapxy)
clifford <- cmpfun(clifford)
# Colors
cvec <- viridis(100)
# Initial set up (a, b, c, d)
set.seed(2)
initialvalues <- data.frame(replicate(4,round(runif(10, min=-2, max=2), 2)))
for (i in 1:nrow(initialvalues)) {
a <- initialvalues[i,1]
b <- initialvalues[i,2]
c <- initialvalues[i,3]
d <- initialvalues[i,4]
cat("Iteration: ", i)
cat("a: ", a, "b: ", b, "c: ", c, "d: ", d)
mat <- matrix(0, nr=height, nc=width)
clifford(rsamp[,1], rsamp[,2])
dens <- log(mat + 1)/log(max(mat))
par(mar=c(0, 0, 0, 0))
png(filename = paste0("images/NewNewNewNewClifford", i, ".png"), width = 1200,
height = 1200, pointsize = 0.1)
image(t(dens), col=cvec, useRaster=T, xaxt='n', yaxt='n')
dev.off()
cat('Generated image ', i)
} |
#----Data Frame----
#We can create a dataframe by combining variables of same length.
# Create a, b, c, d variables
a <- c(10,20,30,40)
b <- c('book', 'pen', 'textbook', 'pencil_case')
c <- c(TRUE,FALSE,TRUE,FALSE)
d <- c(2.5, 8, 10, 70)
# Join the variables to create a data frame
df <- data.frame(a,b,c,d)
df
#We can see the column headers have the same name as the variables.
#We can change the column name with the function names()
names(df) <- c('ID', 'items', 'store', 'price')
df
# Print the structure
str(df)
# Note: By default, data frame returns string variables as a factor.
str(df) #structure of DF
head(df) #top 6 rows
head(df,n=3) #top 3 rows
tail(df) #last 6 rows
View(df)
class(df) # DF
summary(df) #summary
#Slice Data Frame:
#We select rows and columns to return into bracket preceded by the name of the data frame
## Select row 1 in column 2
df[1,2]
## Select Rows 1 to 2
df[1:2,]
## Select Columns 1
df[,1]
## Select Rows 1 to 3 and columns 3 to 4
df[1:3, 3:4]
#It is also possible to select the columns with their names.
## Slice with columns name
df[, c('ID', 'store')]
#Append a Column to Data Frame (use the symbol $ to append a new variable.)
# Create a new vector
quantity <- c(10, 35, 40, 5)
# Add `quantity` to the `df` data frame
df$quantity <- quantity
df
#Note:The number of elements in the vector has to be equal to the no of elements in data frame.
#Hence, executing the below statement will give error
quantity <- c(10, 35, 40)
# Add `quantity` to the `df` data frame
df$quantity <- quantity
#Selecting a Column of a Data Frame.
# Select the column ID
df$ID
#Subsetting a Data Frame based on some condition.(we will use subset() fxn for this)
# Select price above 5
subset(df, subset = price > 5)
| /dataframe.R | no_license | Avantika1798/readme | R | false | false | 1,814 | r | #----Data Frame----
#We can create a dataframe by combining variables of same length.
# Create a, b, c, d variables
a <- c(10,20,30,40)
b <- c('book', 'pen', 'textbook', 'pencil_case')
c <- c(TRUE,FALSE,TRUE,FALSE)
d <- c(2.5, 8, 10, 70)
# Join the variables to create a data frame
df <- data.frame(a,b,c,d)
df
#We can see the column headers have the same name as the variables.
#We can change the column name with the function names()
names(df) <- c('ID', 'items', 'store', 'price')
df
# Print the structure
str(df)
# Note: By default, data frame returns string variables as a factor.
str(df) #structure of DF
head(df) #top 6 rows
head(df,n=3) #top 3 rows
tail(df) #last 6 rows
View(df)
class(df) # DF
summary(df) #summary
#Slice Data Frame:
#We select rows and columns to return into bracket preceded by the name of the data frame
## Select row 1 in column 2
df[1,2]
## Select Rows 1 to 2
df[1:2,]
## Select Columns 1
df[,1]
## Select Rows 1 to 3 and columns 3 to 4
df[1:3, 3:4]
#It is also possible to select the columns with their names.
## Slice with columns name
df[, c('ID', 'store')]
#Append a Column to Data Frame (use the symbol $ to append a new variable.)
# Create a new vector
quantity <- c(10, 35, 40, 5)
# Add `quantity` to the `df` data frame
df$quantity <- quantity
df
#Note:The number of elements in the vector has to be equal to the no of elements in data frame.
#Hence, executing the below statement will give error
quantity <- c(10, 35, 40)
# Add `quantity` to the `df` data frame
df$quantity <- quantity
#Selecting a Column of a Data Frame.
# Select the column ID
df$ID
#Subsetting a Data Frame based on some condition.(we will use subset() fxn for this)
# Select price above 5
subset(df, subset = price > 5)
|
cutAverageResHclust <- function(resultMaster){
hscores = list()
for(i in 6:15){
singlescore = list()
singlescoreMax = list()
singlescoreMan = list()
for(j in seq(1,50)){
singlescore[[j]] <- list()
singlescore[[j]]$dunn <- 0
singlescore[[j]]$sil <- 0
singlescore[[j]]$gind <- 0
singlescore[[j]]$k <- 0
singlescoreMax[[j]] <- list()
singlescoreMax[[j]]$dunn <- 0
singlescoreMax[[j]]$sil <- 0
singlescoreMax[[j]]$gind <- 0
singlescoreMax[[j]]$k <- 0
singlescoreMan[[j]] <- list()
singlescoreMan[[j]]$dunn <- 0
singlescoreMan[[j]]$sil <- 0
singlescoreMan[[j]]$gind <- 0
singlescoreMan[[j]]$k <- 0
for(k in 1:10){
singlescore[[j]]$dunn <- singlescore[[j]]$dunn + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$dunn
singlescore[[j]]$sil <- singlescore[[j]]$sil + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$sil
singlescore[[j]]$gind <- singlescore[[j]]$gind+ resultMaster[[i]]$hclust$euclidean[[k]][[j]]$gind
singlescore[[j]]$k <- singlescore[[j]]$k + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$k
singlescoreMax[[j]]$dunn <- singlescoreMax[[j]]$dunn + resultMaster[[i]]$hclust$maximum[[k]][[j]]$dunn
singlescoreMax[[j]]$sil <- singlescoreMax[[j]]$sil + resultMaster[[i]]$hclust$maximum[[k]][[j]]$sil
singlescoreMax[[j]]$gind <- singlescoreMax[[j]]$gind+ resultMaster[[i]]$hclust$maximum[[k]][[j]]$gind
singlescoreMax[[j]]$k <- singlescoreMax[[j]]$k + resultMaster[[i]]$hclust$maximum[[k]][[j]]$k
singlescoreMan[[j]]$dunn <- singlescoreMan[[j]]$dunn + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$dunn
singlescoreMan[[j]]$sil <- singlescoreMan[[j]]$sil + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$sil
singlescoreMan[[j]]$gind <- singlescoreMan[[j]]$gind+ resultMaster[[i]]$hclust$manhattan[[k]][[j]]$gind
singlescoreMan[[j]]$k <- singlescoreMan[[j]]$k + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$k
}
singlescore[[j]]$dunn <- singlescore[[j]]$dunn/10
singlescore[[j]]$sil <- singlescore[[j]]$sil/10
singlescore[[j]]$gind <- singlescore[[j]]$gind/10
singlescore[[j]]$k <- singlescore[[j]]$k/10
singlescoreMax[[j]]$dunn <- singlescoreMax[[j]]$dunn/10
singlescoreMax[[j]]$sil <- singlescoreMax[[j]]$sil/10
singlescoreMax[[j]]$gind <- singlescoreMax[[j]]$gind/10
singlescoreMax[[j]]$k <- singlescoreMax[[j]]$k/10
singlescoreMan[[j]]$dunn <- singlescoreMan[[j]]$dunn/10
singlescoreMan[[j]]$sil <- singlescoreMan[[j]]$sil/10
singlescoreMan[[j]]$gind <- singlescoreMan[[j]]$gind/10
singlescoreMan[[j]]$k <- singlescoreMan[[j]]$k/10
}
hscores[[i]] <- list(euclidean=singlescore, maximum=singlescoreMax, manhattan=singlescoreMan)
}
return(hscores)
}
cutAverageResPam <- function(resultMaster){
hscores = list()
for(i in 6:15){
singlescore = list()
singlescoreMan = list()
for(j in seq(1,50)){
singlescore[[j]] <- list()
singlescore[[j]]$dunn <- 0
singlescore[[j]]$sil <- 0
singlescore[[j]]$gind <- 0
singlescore[[j]]$k <- 0
singlescoreMan[[j]] <- list()
singlescoreMan[[j]]$dunn <- 0
singlescoreMan[[j]]$sil <- 0
singlescoreMan[[j]]$gind <- 0
singlescoreMan[[j]]$k <- 0
for(k in 1:10){
singlescore[[j]]$dunn <- singlescore[[j]]$dunn + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$dunn
singlescore[[j]]$sil <- singlescore[[j]]$sil + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$sil
singlescore[[j]]$gind <- singlescore[[j]]$gind+ resultMaster[[i]]$hclust$euclidean[[k]][[j]]$gind
singlescore[[j]]$k <- singlescore[[j]]$k + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$k
singlescoreMan[[j]]$dunn <- singlescoreMan[[j]]$dunn + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$dunn
singlescoreMan[[j]]$sil <- singlescoreMan[[j]]$sil + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$sil
singlescoreMan[[j]]$gind <- singlescoreMan[[j]]$gind+ resultMaster[[i]]$hclust$manhattan[[k]][[j]]$gind
singlescoreMan[[j]]$k <- singlescoreMan[[j]]$k + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$k
}
singlescore[[j]]$dunn <- singlescore[[j]]$dunn/10
singlescore[[j]]$sil <- singlescore[[j]]$sil/10
singlescore[[j]]$gind <- singlescore[[j]]$gind/10
singlescore[[j]]$k <- singlescore[[j]]$k/10
singlescoreMan[[j]]$dunn <- singlescoreMan[[j]]$dunn/10
singlescoreMan[[j]]$sil <- singlescoreMan[[j]]$sil/10
singlescoreMan[[j]]$gind <- singlescoreMan[[j]]$gind/10
singlescoreMan[[j]]$k <- singlescoreMan[[j]]$k/10
}
hscores[[i]] <- list(euclidean=singlescore, manhattan=singlescoreMan)
}
return(hscores)
}
cutAverageResKmeans <- function(resultMaster){
hscores = list()
for(i in 6:15){
singlescore = list()
singlescoreMan = list()
for(j in seq(1,50)){
singlescore[[j]] <- list()
singlescore[[j]]$dunn <- 0
singlescore[[j]]$sil <- 0
singlescore[[j]]$gind <- 0
singlescore[[j]]$k <- 0
for(k in 1:10){
singlescore[[j]]$dunn <- singlescore[[j]]$dunn + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$dunn
singlescore[[j]]$sil <- singlescore[[j]]$sil + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$sil
singlescore[[j]]$gind <- singlescore[[j]]$gind+ resultMaster[[i]]$hclust$euclidean[[k]][[j]]$gind
singlescore[[j]]$k <- singlescore[[j]]$k + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$k
}
singlescore[[j]]$dunn <- singlescore[[j]]$dunn/10
singlescore[[j]]$sil <- singlescore[[j]]$sil/10
singlescore[[j]]$gind <- singlescore[[j]]$gind/10
singlescore[[j]]$k <- singlescore[[j]]$k/10
}
hscores[[i]] <- list(euclidean=singlescore)
}
return(hscores)
}
| /cutAveraged.R | no_license | mon3/EA_Grouping | R | false | false | 5,938 | r | cutAverageResHclust <- function(resultMaster){
hscores = list()
for(i in 6:15){
singlescore = list()
singlescoreMax = list()
singlescoreMan = list()
for(j in seq(1,50)){
singlescore[[j]] <- list()
singlescore[[j]]$dunn <- 0
singlescore[[j]]$sil <- 0
singlescore[[j]]$gind <- 0
singlescore[[j]]$k <- 0
singlescoreMax[[j]] <- list()
singlescoreMax[[j]]$dunn <- 0
singlescoreMax[[j]]$sil <- 0
singlescoreMax[[j]]$gind <- 0
singlescoreMax[[j]]$k <- 0
singlescoreMan[[j]] <- list()
singlescoreMan[[j]]$dunn <- 0
singlescoreMan[[j]]$sil <- 0
singlescoreMan[[j]]$gind <- 0
singlescoreMan[[j]]$k <- 0
for(k in 1:10){
singlescore[[j]]$dunn <- singlescore[[j]]$dunn + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$dunn
singlescore[[j]]$sil <- singlescore[[j]]$sil + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$sil
singlescore[[j]]$gind <- singlescore[[j]]$gind+ resultMaster[[i]]$hclust$euclidean[[k]][[j]]$gind
singlescore[[j]]$k <- singlescore[[j]]$k + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$k
singlescoreMax[[j]]$dunn <- singlescoreMax[[j]]$dunn + resultMaster[[i]]$hclust$maximum[[k]][[j]]$dunn
singlescoreMax[[j]]$sil <- singlescoreMax[[j]]$sil + resultMaster[[i]]$hclust$maximum[[k]][[j]]$sil
singlescoreMax[[j]]$gind <- singlescoreMax[[j]]$gind+ resultMaster[[i]]$hclust$maximum[[k]][[j]]$gind
singlescoreMax[[j]]$k <- singlescoreMax[[j]]$k + resultMaster[[i]]$hclust$maximum[[k]][[j]]$k
singlescoreMan[[j]]$dunn <- singlescoreMan[[j]]$dunn + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$dunn
singlescoreMan[[j]]$sil <- singlescoreMan[[j]]$sil + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$sil
singlescoreMan[[j]]$gind <- singlescoreMan[[j]]$gind+ resultMaster[[i]]$hclust$manhattan[[k]][[j]]$gind
singlescoreMan[[j]]$k <- singlescoreMan[[j]]$k + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$k
}
singlescore[[j]]$dunn <- singlescore[[j]]$dunn/10
singlescore[[j]]$sil <- singlescore[[j]]$sil/10
singlescore[[j]]$gind <- singlescore[[j]]$gind/10
singlescore[[j]]$k <- singlescore[[j]]$k/10
singlescoreMax[[j]]$dunn <- singlescoreMax[[j]]$dunn/10
singlescoreMax[[j]]$sil <- singlescoreMax[[j]]$sil/10
singlescoreMax[[j]]$gind <- singlescoreMax[[j]]$gind/10
singlescoreMax[[j]]$k <- singlescoreMax[[j]]$k/10
singlescoreMan[[j]]$dunn <- singlescoreMan[[j]]$dunn/10
singlescoreMan[[j]]$sil <- singlescoreMan[[j]]$sil/10
singlescoreMan[[j]]$gind <- singlescoreMan[[j]]$gind/10
singlescoreMan[[j]]$k <- singlescoreMan[[j]]$k/10
}
hscores[[i]] <- list(euclidean=singlescore, maximum=singlescoreMax, manhattan=singlescoreMan)
}
return(hscores)
}
cutAverageResPam <- function(resultMaster){
hscores = list()
for(i in 6:15){
singlescore = list()
singlescoreMan = list()
for(j in seq(1,50)){
singlescore[[j]] <- list()
singlescore[[j]]$dunn <- 0
singlescore[[j]]$sil <- 0
singlescore[[j]]$gind <- 0
singlescore[[j]]$k <- 0
singlescoreMan[[j]] <- list()
singlescoreMan[[j]]$dunn <- 0
singlescoreMan[[j]]$sil <- 0
singlescoreMan[[j]]$gind <- 0
singlescoreMan[[j]]$k <- 0
for(k in 1:10){
singlescore[[j]]$dunn <- singlescore[[j]]$dunn + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$dunn
singlescore[[j]]$sil <- singlescore[[j]]$sil + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$sil
singlescore[[j]]$gind <- singlescore[[j]]$gind+ resultMaster[[i]]$hclust$euclidean[[k]][[j]]$gind
singlescore[[j]]$k <- singlescore[[j]]$k + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$k
singlescoreMan[[j]]$dunn <- singlescoreMan[[j]]$dunn + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$dunn
singlescoreMan[[j]]$sil <- singlescoreMan[[j]]$sil + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$sil
singlescoreMan[[j]]$gind <- singlescoreMan[[j]]$gind+ resultMaster[[i]]$hclust$manhattan[[k]][[j]]$gind
singlescoreMan[[j]]$k <- singlescoreMan[[j]]$k + resultMaster[[i]]$hclust$manhattan[[k]][[j]]$k
}
singlescore[[j]]$dunn <- singlescore[[j]]$dunn/10
singlescore[[j]]$sil <- singlescore[[j]]$sil/10
singlescore[[j]]$gind <- singlescore[[j]]$gind/10
singlescore[[j]]$k <- singlescore[[j]]$k/10
singlescoreMan[[j]]$dunn <- singlescoreMan[[j]]$dunn/10
singlescoreMan[[j]]$sil <- singlescoreMan[[j]]$sil/10
singlescoreMan[[j]]$gind <- singlescoreMan[[j]]$gind/10
singlescoreMan[[j]]$k <- singlescoreMan[[j]]$k/10
}
hscores[[i]] <- list(euclidean=singlescore, manhattan=singlescoreMan)
}
return(hscores)
}
cutAverageResKmeans <- function(resultMaster){
hscores = list()
for(i in 6:15){
singlescore = list()
singlescoreMan = list()
for(j in seq(1,50)){
singlescore[[j]] <- list()
singlescore[[j]]$dunn <- 0
singlescore[[j]]$sil <- 0
singlescore[[j]]$gind <- 0
singlescore[[j]]$k <- 0
for(k in 1:10){
singlescore[[j]]$dunn <- singlescore[[j]]$dunn + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$dunn
singlescore[[j]]$sil <- singlescore[[j]]$sil + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$sil
singlescore[[j]]$gind <- singlescore[[j]]$gind+ resultMaster[[i]]$hclust$euclidean[[k]][[j]]$gind
singlescore[[j]]$k <- singlescore[[j]]$k + resultMaster[[i]]$hclust$euclidean[[k]][[j]]$k
}
singlescore[[j]]$dunn <- singlescore[[j]]$dunn/10
singlescore[[j]]$sil <- singlescore[[j]]$sil/10
singlescore[[j]]$gind <- singlescore[[j]]$gind/10
singlescore[[j]]$k <- singlescore[[j]]$k/10
}
hscores[[i]] <- list(euclidean=singlescore)
}
return(hscores)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.