content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kcal.R
\name{calculateKcalPerModel}
\alias{calculateKcalPerModel}
\title{Calculate kcal per model}
\usage{
calculateKcalPerModel(model.res, kcal.params = c(carbs = 4, prot = 4,
satu = 9, mono = 9, poly = 9))
}
\arguments{
\item{model.res}{model results i.e. \code{calculateParams()} function output}
\item{kcal.params}{vector with kcal values for carbs (kcal for carbohydrates),
prot (kcal for protein), satu (kcal for saturated fat),
mono (kcal for monosaturated fat), poly (kcal for polyunsaturated fat)
(default: \code{c(carbs = 4, prot = 4, satu = 9, mono = 9, poly = 9)})}
}
\value{
matrix with model results and additional column kcal
}
\description{
Calculate kcal per model
}
\author{
Konrad J. Debski
}
| /packages/fsProjAlzOptimalDiet/man/calculateKcalPerModel.Rd | no_license | seventm/fsproj_alzheimeroptimaldiet | R | false | true | 798 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kcal.R
\name{calculateKcalPerModel}
\alias{calculateKcalPerModel}
\title{Calculate kcal per model}
\usage{
calculateKcalPerModel(model.res, kcal.params = c(carbs = 4, prot = 4,
satu = 9, mono = 9, poly = 9))
}
\arguments{
\item{model.res}{model results i.e. \code{calculateParams()} function output}
\item{kcal.params}{vector with kcal values for carbs (kcal for carbohydrates),
prot (kcal for protein), satu (kcal for saturated fat),
mono (kcal for monosaturated fat), poly (kcal for polyunsaturated fat)
(default: \code{c(carbs = 4, prot = 4, satu = 9, mono = 9, poly = 9)})}
}
\value{
matrix with model results and additional column kcal
}
\description{
Calculate kcal per model
}
\author{
Konrad J. Debski
}
|
#' plot.phenologymap plots a likelihood map with Delta and Phi varying.
#' @title Plot a likelihood map with Delta and Phi varying.
#' @author Marc Girondot
#' @return Return None
#' @param x A map generated with map_phenology.
#' @param ... not used
#' @param col Colors could be heat.colors(128) or rainbow(64) or col=gray(c(seq(0, 1, length.out=128)))
#' @param xlab Label for x axis
#' @param ylab Label for y axis
#' @description This function plots a likelihood map obtained after map_phenology.
#' @family Phenology model
#' @examples
#' \dontrun{
#' library("phenology")
#' # Read a file with data
#' data(Gratiot)
#' # Generate a formatted list nammed data_Gratiot
#' data_Gratiot<-add_phenology(Gratiot, name="Complete",
#' reference=as.Date("2001-01-01"), format="%d/%m/%Y")
#' # Generate initial points for the optimisation
#' parg<-par_init(data_Gratiot, fixed.parameters=NULL)
#' # Run the optimisation
#' result_Gratiot<-fit_phenology(data=data_Gratiot,
#' fitted.parameters=parg, fixed.parameters=NULL)
#' data(result_Gratiot)
#' # Extract the fitted parameters
#' parg1<-extract_result(result_Gratiot)
#' # Add constant Alpha and Tau values
#' # [day d amplitude=(Alpha+Nd*Beta)^Tau with Nd being the number of counts for day d]
#' pfixed<-c(parg1, Alpha=0, Tau=1)
#' pfixed<-pfixed[-which(names(pfixed)=="Theta")]
#' # The only fitted parameter will be Beta
#' parg2<-c(Beta=0.5, parg1["Theta"])
#' # Generate a likelihood map
#' # [default Phi=seq(from=0.1, to=20, length.out=100) but it is very long]
#' # Take care, it takes 20 hours ! The data map_Gratiot has the result
#' map_Gratiot<-map_phenology(data=data_Gratiot,
#' Phi=seq(from=0.1, to=20, length.out=100),
#' fitted.parameters=parg2, fixed.parameters=pfixed)
#' data(map_Gratiot)
#' # Plot the map
#' plot(map_Gratiot, col=heat.colors(128))
#' }
#' @method plot phenologymap
#' @export
plot.phenologymap <-
function(x, ..., col=heat.colors(128), xlab="Phi", ylab="Delta") {
if (!requireNamespace("fields", quietly = TRUE)) {
stop("fields package is required for this function; Please install it first")
}
map <- x
x <- map$Phi
y <- map$Delta
input<-map$input
# image.plot(x, y, input, col=col, axes=TRUE, xlab=xlab, ylab=ylab, nlevel = length(col))
getFromNamespace("image.plot", ns="fields")(x, y, input, zlim=c(min(input, na.rm=TRUE), max(input, na.rm=TRUE)), col=col, axes=TRUE, xlab="Phi", ylab="Delta", nlevel = length(col))
}
| /R/plot.phenologymap.R | no_license | cran/phenology | R | false | false | 2,541 | r | #' plot.phenologymap plots a likelihood map with Delta and Phi varying.
#' @title Plot a likelihood map with Delta and Phi varying.
#' @author Marc Girondot
#' @return Return None
#' @param x A map generated with map_phenology.
#' @param ... not used
#' @param col Colors could be heat.colors(128) or rainbow(64) or col=gray(c(seq(0, 1, length.out=128)))
#' @param xlab Label for x axis
#' @param ylab Label for y axis
#' @description This function plots a likelihood map obtained after map_phenology.
#' @family Phenology model
#' @examples
#' \dontrun{
#' library("phenology")
#' # Read a file with data
#' data(Gratiot)
#' # Generate a formatted list nammed data_Gratiot
#' data_Gratiot<-add_phenology(Gratiot, name="Complete",
#' reference=as.Date("2001-01-01"), format="%d/%m/%Y")
#' # Generate initial points for the optimisation
#' parg<-par_init(data_Gratiot, fixed.parameters=NULL)
#' # Run the optimisation
#' result_Gratiot<-fit_phenology(data=data_Gratiot,
#' fitted.parameters=parg, fixed.parameters=NULL)
#' data(result_Gratiot)
#' # Extract the fitted parameters
#' parg1<-extract_result(result_Gratiot)
#' # Add constant Alpha and Tau values
#' # [day d amplitude=(Alpha+Nd*Beta)^Tau with Nd being the number of counts for day d]
#' pfixed<-c(parg1, Alpha=0, Tau=1)
#' pfixed<-pfixed[-which(names(pfixed)=="Theta")]
#' # The only fitted parameter will be Beta
#' parg2<-c(Beta=0.5, parg1["Theta"])
#' # Generate a likelihood map
#' # [default Phi=seq(from=0.1, to=20, length.out=100) but it is very long]
#' # Take care, it takes 20 hours ! The data map_Gratiot has the result
#' map_Gratiot<-map_phenology(data=data_Gratiot,
#' Phi=seq(from=0.1, to=20, length.out=100),
#' fitted.parameters=parg2, fixed.parameters=pfixed)
#' data(map_Gratiot)
#' # Plot the map
#' plot(map_Gratiot, col=heat.colors(128))
#' }
#' @method plot phenologymap
#' @export
plot.phenologymap <-
function(x, ..., col=heat.colors(128), xlab="Phi", ylab="Delta") {
if (!requireNamespace("fields", quietly = TRUE)) {
stop("fields package is required for this function; Please install it first")
}
map <- x
x <- map$Phi
y <- map$Delta
input<-map$input
# image.plot(x, y, input, col=col, axes=TRUE, xlab=xlab, ylab=ylab, nlevel = length(col))
getFromNamespace("image.plot", ns="fields")(x, y, input, zlim=c(min(input, na.rm=TRUE), max(input, na.rm=TRUE)), col=col, axes=TRUE, xlab="Phi", ylab="Delta", nlevel = length(col))
}
|
#' h2o4gpu in R
#'
#' @docType package
#' @name h2o4gpu
#'
#' @examples
#' \dontrun{
#'
#' library(h2o4gpu)
#'
#' # Setup dataset
#' x <- iris[1:4]
#' y <- as.integer(iris$Species) - 1
#'
#' # Initialize and train the classifier
#' model <- h2o4gpu.random_forest_classifier() %>% fit(x, y)
#'
#' # Make predictions
#' predictions <- model %>% predict(x)
#'
#' }
NULL
h2o4gpu <- NULL
np <- NULL
.onLoad <- function(libname, pkgname) {
# delay load handler
displayed_warning <- FALSE
delay_load <- list(
priority = 5,
environment = "r-h2o4gpu",
on_load = function() {
check_compatibility(displayed_warning)
},
on_error = function(e) {
stop(e$error_message, call. = FALSE)
}
)
h2o4gpu <<- reticulate::import("h2o4gpu", delay_load = delay_load)
np <<- reticulate::import("numpy", convert = FALSE, delay_load = TRUE)
}
# Placeholder for now
check_compatibility <- function(displayed_warning) {
}
| /src/interface_r/R/package.R | permissive | pnijhara/h2o4gpu | R | false | false | 980 | r | #' h2o4gpu in R
#'
#' @docType package
#' @name h2o4gpu
#'
#' @examples
#' \dontrun{
#'
#' library(h2o4gpu)
#'
#' # Setup dataset
#' x <- iris[1:4]
#' y <- as.integer(iris$Species) - 1
#'
#' # Initialize and train the classifier
#' model <- h2o4gpu.random_forest_classifier() %>% fit(x, y)
#'
#' # Make predictions
#' predictions <- model %>% predict(x)
#'
#' }
NULL
h2o4gpu <- NULL
np <- NULL
.onLoad <- function(libname, pkgname) {
# delay load handler
displayed_warning <- FALSE
delay_load <- list(
priority = 5,
environment = "r-h2o4gpu",
on_load = function() {
check_compatibility(displayed_warning)
},
on_error = function(e) {
stop(e$error_message, call. = FALSE)
}
)
h2o4gpu <<- reticulate::import("h2o4gpu", delay_load = delay_load)
np <<- reticulate::import("numpy", convert = FALSE, delay_load = TRUE)
}
# Placeholder for now
check_compatibility <- function(displayed_warning) {
}
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ProjectName: AZ MAX Price
# Purpose: AZ Price
# programmer: Zhe Liu
# Date: 2020-10-14
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
options(java.parameters = "-Xmx2048m",
stringsAsFactors = FALSE)
##---- loading the required packages ----
suppressPackageStartupMessages({
require(openxlsx)
require(readxl)
require(feather)
require(plyr)
require(stringi)
require(feather)
require(RODBC)
require(MASS)
require(car)
require(data.table)
require(plotly)
require(tidyverse)
require(lubridate)
require(forecast)
require(kknn)
})
##---- setup the directories ----
system("mkdir 01_Background 02_Inputs 03_Outputs 04_Codes 05_Internal_Review 06_Deliveries")
| /00_Preparing_Working.R | no_license | Zaphiroth/AZ_MAX_2017_2020 | R | false | false | 858 | r | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ProjectName: AZ MAX Price
# Purpose: AZ Price
# programmer: Zhe Liu
# Date: 2020-10-14
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
options(java.parameters = "-Xmx2048m",
stringsAsFactors = FALSE)
##---- loading the required packages ----
suppressPackageStartupMessages({
require(openxlsx)
require(readxl)
require(feather)
require(plyr)
require(stringi)
require(feather)
require(RODBC)
require(MASS)
require(car)
require(data.table)
require(plotly)
require(tidyverse)
require(lubridate)
require(forecast)
require(kknn)
})
##---- setup the directories ----
system("mkdir 01_Background 02_Inputs 03_Outputs 04_Codes 05_Internal_Review 06_Deliveries")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparsenet_enrichment_functions.R
\name{countIntType_batch}
\alias{countIntType_batch}
\title{Counts number of (+,+) and (+,-) interactions in a set of networks}
\usage{
countIntType_batch(
inFiles,
plusID,
minusID,
tmpDir = tempdir(),
enrType = "binary",
numCores = 1L
)
}
\arguments{
\item{inFiles}{(char) path to interaction networks to process}
\item{plusID}{(char) IDs of + nodes}
\item{minusID}{(char) IDs of - nodes}
\item{tmpDir}{(char) path to dir where temporary files can be stored}
\item{enrType}{(char) see getEnr.R}
\item{numCores}{(integer) number of cores for parallel processing}
}
\value{
(matrix) two columns, one row per network
If \code{enrType="binary"}, number of (+,+) and other interactions
Otherwise if \code{enrType="corr"} mean edge weight of (+,+) edges and
of other edges
}
\description{
Counts number of (+,+) and (+,-) interactions in a set of networks
}
\examples{
d <- tempdir()
# write PSN
m1 <- matrix(c("P1","P1","P2","P2","P3","P4",1,1,1),byrow=FALSE,ncol=3)
write.table(m1,file=paste(d,"net1.txt",sep=getFileSep()),sep="\t",
col.names=FALSE,row.names=FALSE,quote=FALSE)
m2 <- matrix(c("P3","P4",1),nrow=1)
write.table(m2,file=paste(d,"net2.txt",sep=getFileSep()),sep="\t",
col.names=FALSE,row.names=FALSE,quote=FALSE)
countIntType_batch(paste(d,c("net1.txt","net2.txt"),sep=getFileSep()),
c("P1","P2","P3"),c("P4","P5"))
}
| /man/countIntType_batch.Rd | permissive | BaderLab/netDx | R | false | true | 1,459 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparsenet_enrichment_functions.R
\name{countIntType_batch}
\alias{countIntType_batch}
\title{Counts number of (+,+) and (+,-) interactions in a set of networks}
\usage{
countIntType_batch(
inFiles,
plusID,
minusID,
tmpDir = tempdir(),
enrType = "binary",
numCores = 1L
)
}
\arguments{
\item{inFiles}{(char) path to interaction networks to process}
\item{plusID}{(char) IDs of + nodes}
\item{minusID}{(char) IDs of - nodes}
\item{tmpDir}{(char) path to dir where temporary files can be stored}
\item{enrType}{(char) see getEnr.R}
\item{numCores}{(integer) number of cores for parallel processing}
}
\value{
(matrix) two columns, one row per network
If \code{enrType="binary"}, number of (+,+) and other interactions
Otherwise if \code{enrType="corr"} mean edge weight of (+,+) edges and
of other edges
}
\description{
Counts number of (+,+) and (+,-) interactions in a set of networks
}
\examples{
d <- tempdir()
# write PSN
m1 <- matrix(c("P1","P1","P2","P2","P3","P4",1,1,1),byrow=FALSE,ncol=3)
write.table(m1,file=paste(d,"net1.txt",sep=getFileSep()),sep="\t",
col.names=FALSE,row.names=FALSE,quote=FALSE)
m2 <- matrix(c("P3","P4",1),nrow=1)
write.table(m2,file=paste(d,"net2.txt",sep=getFileSep()),sep="\t",
col.names=FALSE,row.names=FALSE,quote=FALSE)
countIntType_batch(paste(d,c("net1.txt","net2.txt"),sep=getFileSep()),
c("P1","P2","P3"),c("P4","P5"))
}
|
## These functions are used to make a special "matrix" that can cache its inverse
## to save computing time and power when it is needed again.
## This function explicitly creates a special "matrix" that has functions to set
## and get both values and inverses
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function checks to see whether the inverse is already known and the matrix
## has not changed. If so, it returns the stored inverse. Otherwise it returns a
## new computed value.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data)
x$setinverse(i)
i
} | /cachematrix.R | no_license | Gutsyfrog/ProgrammingAssignment2 | R | false | false | 1,042 | r | ## These functions are used to make a special "matrix" that can cache its inverse
## to save computing time and power when it is needed again.
## This function explicitly creates a special "matrix" that has functions to set
## and get both values and inverses
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function checks to see whether the inverse is already known and the matrix
## has not changed. If so, it returns the stored inverse. Otherwise it returns a
## new computed value.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data)
x$setinverse(i)
i
} |
s=sd(airbnb_normal$bathrooms)
x=mean(airbnb_normal$bathrooms)
airbnb_normal$beds=(airbnb_normal$bathrooms-x)/s
set.seed(3000)
k=10
km.out=kmeans(airbnb_normal[14:31],k,nstart=25)
km.clusters=km.out$cluster
tapply(airbnb$price,km.clusters,mean)
tapply(airbnb$bedrooms,km.clusters,mean)
rn=which(airbnb$bedrooms == 2)
airbnb[rn,]
test=km.clusters[rn]
table(test) | /clustering.R | no_license | tsechunhei/Airbnb-Price-Prediction-with-Machine-Learning | R | false | false | 378 | r | s=sd(airbnb_normal$bathrooms)
x=mean(airbnb_normal$bathrooms)
airbnb_normal$beds=(airbnb_normal$bathrooms-x)/s
set.seed(3000)
k=10
km.out=kmeans(airbnb_normal[14:31],k,nstart=25)
km.clusters=km.out$cluster
tapply(airbnb$price,km.clusters,mean)
tapply(airbnb$bedrooms,km.clusters,mean)
rn=which(airbnb$bedrooms == 2)
airbnb[rn,]
test=km.clusters[rn]
table(test) |
monod_Vmax2<-function(data){
#creating function for parameters estimation
estim<-function(fr){
#defining the objective function
est<-function(x){
Cmic<-as.numeric(fr[, "Biomass"])
Ctot<-as.numeric(fr[, "DOC"])
Temp<-as.numeric(fr[, "Temperature"])
yhat<-x[1]*exp(x[3]/8.314/Temp)*Ctot*Cmic/(x[2]+Ctot)
obs<-as.numeric(fr[, "Rsoil"])
# SSres<-sum(((obs-yhat)^2), na.rm = T)
# SStot<-sum(((obs-mean(obs, na.rm=T))^2), na.rm = T)
# Rsq<-1-(SSres/SStot)
ll<--length(obs)*log(2*mean(obs, na.rm=T)*sd(obs, na.rm=T)^2)/2-sum((obs-yhat)^2, na.rm=T)/2/sd(obs, na.rm=T)^2
# AIC<-2*length(x)-2*ll
return(-2*ll)
}
#defining the goodness of fit function
goodness<-function(x){
Cmic<-as.numeric(fr[, "Biomass"])
Ctot<-as.numeric(fr[, "DOC"])
Temp<-as.numeric(fr[, "Temperature"])
yhat<-x[1]*exp(x[3]/8.314/Temp)*Ctot*Cmic/(x[2]+Ctot)
obs<-as.numeric(fr[, "Rsoil"])
SSres<-sum(((obs-yhat)^2), na.rm = T)
SStot<-sum(((obs-mean(obs, na.rm=T))^2), na.rm = T)
Rsq<-1-(SSres/SStot)
ll<--length(obs)*log(2*mean(obs, na.rm=T)*sd(obs, na.rm=T)^2)/2-sum((obs-yhat)^2, na.rm=T)/2/sd(obs, na.rm=T)^2
AIC<-2*length(x)-2*ll
return(c(R2=Rsq,AIC=AIC, ll=ll))
}
#approximate parameter estimation is done by MCMC method
par_mcmc<-modMCMC(f=est, p=c(1, 30, -60000),
lower=c(1e-30, 3e-6, -300000),
upper=c(1e30, 30000, 30000), niter=50000)
#lower and upper limits for parameters are extracted
pl<-summary(par_mcmc)["min",]
pu<-summary(par_mcmc)["max",]
#these limits are used to find global optimum by DEoptim
opt_par<-DEoptim(fn=est, lower=pl, upper=pu,
control = c(itermax = 10000, steptol = 50, reltol = 1e-8,
trace=FALSE, strategy=3, NP=500))
#goodness of fit
fit<-goodness(opt_par$optim$bestmem)
#approximate parameter estimation is done by MCMC method
# par_prof<-modMCMC(f=est, p=opt_par$optim$bestmem,
# lower=pl,
# upper=pu, niter=50000)
#
# #best parameters
# p<-opt_par$optim$bestmem
# names(p)<-c("ks", "Eas")
#
# #sd of parameters
# p.sd<-summary(par_prof)[2,]
#return list with opt_par and par_prof
# estim_out<-list()
# estim_out$pars<-p
# estim_out$pars.sd<-p.sd
# estim_out$fit<-fit
return(fit)
}
#do the calculation
res<-estim(fr=data)
# ks<-as.numeric(res$pars[1])
# Eas<-as.numeric(res$pars[2])
# ks.sd<-as.numeric(res$pars.sd[1])
# Eas.sd<-as.numeric(res$pars.sd[2])
# R2s<-as.numeric(res[1])
# AICs<-as.numeric(res[2])
# lls<-as.numeric(res[3])
#
return(res)
}
| /Global_Eqs/monod_Vmax2.R | no_license | petacapek/Rsoil_Tsensitivity | R | false | false | 2,796 | r | monod_Vmax2<-function(data){
#creating function for parameters estimation
estim<-function(fr){
#defining the objective function
est<-function(x){
Cmic<-as.numeric(fr[, "Biomass"])
Ctot<-as.numeric(fr[, "DOC"])
Temp<-as.numeric(fr[, "Temperature"])
yhat<-x[1]*exp(x[3]/8.314/Temp)*Ctot*Cmic/(x[2]+Ctot)
obs<-as.numeric(fr[, "Rsoil"])
# SSres<-sum(((obs-yhat)^2), na.rm = T)
# SStot<-sum(((obs-mean(obs, na.rm=T))^2), na.rm = T)
# Rsq<-1-(SSres/SStot)
ll<--length(obs)*log(2*mean(obs, na.rm=T)*sd(obs, na.rm=T)^2)/2-sum((obs-yhat)^2, na.rm=T)/2/sd(obs, na.rm=T)^2
# AIC<-2*length(x)-2*ll
return(-2*ll)
}
#defining the goodness of fit function
goodness<-function(x){
Cmic<-as.numeric(fr[, "Biomass"])
Ctot<-as.numeric(fr[, "DOC"])
Temp<-as.numeric(fr[, "Temperature"])
yhat<-x[1]*exp(x[3]/8.314/Temp)*Ctot*Cmic/(x[2]+Ctot)
obs<-as.numeric(fr[, "Rsoil"])
SSres<-sum(((obs-yhat)^2), na.rm = T)
SStot<-sum(((obs-mean(obs, na.rm=T))^2), na.rm = T)
Rsq<-1-(SSres/SStot)
ll<--length(obs)*log(2*mean(obs, na.rm=T)*sd(obs, na.rm=T)^2)/2-sum((obs-yhat)^2, na.rm=T)/2/sd(obs, na.rm=T)^2
AIC<-2*length(x)-2*ll
return(c(R2=Rsq,AIC=AIC, ll=ll))
}
#approximate parameter estimation is done by MCMC method
par_mcmc<-modMCMC(f=est, p=c(1, 30, -60000),
lower=c(1e-30, 3e-6, -300000),
upper=c(1e30, 30000, 30000), niter=50000)
#lower and upper limits for parameters are extracted
pl<-summary(par_mcmc)["min",]
pu<-summary(par_mcmc)["max",]
#these limits are used to find global optimum by DEoptim
opt_par<-DEoptim(fn=est, lower=pl, upper=pu,
control = c(itermax = 10000, steptol = 50, reltol = 1e-8,
trace=FALSE, strategy=3, NP=500))
#goodness of fit
fit<-goodness(opt_par$optim$bestmem)
#approximate parameter estimation is done by MCMC method
# par_prof<-modMCMC(f=est, p=opt_par$optim$bestmem,
# lower=pl,
# upper=pu, niter=50000)
#
# #best parameters
# p<-opt_par$optim$bestmem
# names(p)<-c("ks", "Eas")
#
# #sd of parameters
# p.sd<-summary(par_prof)[2,]
#return list with opt_par and par_prof
# estim_out<-list()
# estim_out$pars<-p
# estim_out$pars.sd<-p.sd
# estim_out$fit<-fit
return(fit)
}
#do the calculation
res<-estim(fr=data)
# ks<-as.numeric(res$pars[1])
# Eas<-as.numeric(res$pars[2])
# ks.sd<-as.numeric(res$pars.sd[1])
# Eas.sd<-as.numeric(res$pars.sd[2])
# R2s<-as.numeric(res[1])
# AICs<-as.numeric(res[2])
# lls<-as.numeric(res[3])
#
return(res)
}
|
library(RLLVMCompile)
f = function(n) { x = numeric(n) ; x[1] = 1; x}
fc = compileFunction(f, REALSXPType, Int32Type)
# ??? Does this work showModule(fc)
# Do we need to protect the new vector?
# Should we have a GEP?
.llvm(fc, 10)
| /tests/numericCall.R | no_license | duncantl/RLLVMCompile | R | false | false | 238 | r | library(RLLVMCompile)
f = function(n) { x = numeric(n) ; x[1] = 1; x}
fc = compileFunction(f, REALSXPType, Int32Type)
# ??? Does this work showModule(fc)
# Do we need to protect the new vector?
# Should we have a GEP?
.llvm(fc, 10)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function( m = matrix() ) {
## Initializing the inverse property for the matrix
i <- NULL
## setting/initializing the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
##getting the matrix
get <- function() {
## Returning the value of matrix
m
}
##calculating the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Retreiving inverse of the matrix
getInverse <- function() {
i
}
## Returning a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(z, ...) {
## Returning a matrix that is the inverse of 'z'
m <- z$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cache data from CPU")
return(m)
}
## Getting the matrix using our object
data <- z$get()
## Calculate the inverse
m <- solve(data) %*% data
## Seting the inverse to the object
z$setInverse(m)
## Returning the matrix
m
}
## Write a short comment describing this function
cacheSolve <- function(z, ...) {
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | ashwin2792/Programming_Assignment_2 | R | false | false | 1,606 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function( m = matrix() ) {
## Initializing the inverse property for the matrix
i <- NULL
## setting/initializing the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
##getting the matrix
get <- function() {
## Returning the value of matrix
m
}
##calculating the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Retreiving inverse of the matrix
getInverse <- function() {
i
}
## Returning a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(z, ...) {
## Returning a matrix that is the inverse of 'z'
m <- z$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cache data from CPU")
return(m)
}
## Getting the matrix using our object
data <- z$get()
## Calculate the inverse
m <- solve(data) %*% data
## Seting the inverse to the object
z$setInverse(m)
## Returning the matrix
m
}
## Write a short comment describing this function
cacheSolve <- function(z, ...) {
## Return a matrix that is the inverse of 'x'
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.infos.rivers.R
\name{read.infos.rivers}
\alias{read.infos.rivers}
\title{Read csv file with information on rivers and return it in a nested list}
\usage{
read.infos.rivers(filename, grids)
}
\arguments{
\item{filename}{path and name of the csv file with river information}
\item{grids}{nested list with grid definitions: grids$fine and grids$coarse are grid definitions of the fine and coarse, respectively BSH-HBM grids}
}
\value{
nested list with grid information it contains:
}
\description{
Columns in the csv file: id, name, lat, lon, grid, ycell, xcell, filename
}
\details{
Elements of the output list: filename, lon, lat, xcell, ycell, grid, riverid, rivername
}
\examples{
# get grid info:
grid_info <- get.infos.grids.hbm.basic()
# set filename
file <- 'files/river_list.dat'
# get river infos
riverInfos <- read.infos.rivers(file, grid_info)
# (you will get some warnings here)
Elements of the output list:
filename
lon
lat
xcell
ycell
grid [fine,coarse]
riverid
rivername
}
\seealso{
get.infos.grids.hbm.basic
}
\author{
Daniel Neumann, daniel.neumann@io-warnemuende.de
}
| /man/read.infos.rivers.Rd | permissive | neumannd/riverdata | R | false | true | 1,210 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.infos.rivers.R
\name{read.infos.rivers}
\alias{read.infos.rivers}
\title{Read csv file with information on rivers and return it in a nested list}
\usage{
read.infos.rivers(filename, grids)
}
\arguments{
\item{filename}{path and name of the csv file with river information}
\item{grids}{nested list with grid definitions: grids$fine and grids$coarse are grid definitions of the fine and coarse, respectively BSH-HBM grids}
}
\value{
nested list with grid information it contains:
}
\description{
Columns in the csv file: id, name, lat, lon, grid, ycell, xcell, filename
}
\details{
Elements of the output list: filename, lon, lat, xcell, ycell, grid, riverid, rivername
}
\examples{
# get grid info:
grid_info <- get.infos.grids.hbm.basic()
# set filename
file <- 'files/river_list.dat'
# get river infos
riverInfos <- read.infos.rivers(file, grid_info)
# (you will get some warnings here)
Elements of the output list:
filename
lon
lat
xcell
ycell
grid [fine,coarse]
riverid
rivername
}
\seealso{
get.infos.grids.hbm.basic
}
\author{
Daniel Neumann, daniel.neumann@io-warnemuende.de
}
|
setwd("C:/Users/Ludwig/OneDrive - London School of Economics/LSE/Dissertation/Data Analysis/Raw Data Qualtrics")
AgencyEN <- read_delim("Agency ALL ENGISH.CSV",";", escape_double = FALSE, trim_ws = TRUE)
#[1] "StartDate" "EndDate" "Status"
#[4] "IPAddress" "Progress" "Duration (in seconds)"
#[7] "Finished" "RecordedDate" "ResponseId"
#[10] "RecipientLastName" "RecipientFirstName" "RecipientEmail"
#[13] "ExternalReference" "LocationLatitude" "LocationLongitude"
#[16] "DistributionChannel" "UserLanguage" "Q25"
#[19] "Q37_1" "Q18" "Q34_1"
#[22] "D2" "Q35_1" "Q32"
#[25] "Q1" "Q2" "Q6"
#[28] "Q33" "Q26" "Q29"
#[31] "Q19" "Q15_1" "Q21"
#[34] "Q3" "Q10" "Q12"
#[37] "SSID" "PID" "SMS"
#[40] "DAY" "SIG" "RDate"
#[43] "TIME" "RT" "TimeZone"
#[46] "addcode" "RID"
names(AgencyEN)[48] <- "MindRaw"
names(AgencyEN)[49] <- "MindCode"
names(AgencyEN)[50] <- "MindValence"
names(AgencyEN)[51] <- "Congruence"
names(AgencyEN)[52] <- "ActionRaw"
names(AgencyEN)[53] <- "ActionCode"
# Throw out variables
AgencyEN <- AgencyEN[ -c(1:2), -c(2:17, 20, 22, 35, 37, 39, 42, 45:47) ]
# Remove empty rows
library(dplyr)
library(janitor)
AgencyEN <- AgencyEN %>% remove_empty("rows")
Participants <- read_delim("a--Participants.csv", ";", escape_double = FALSE, trim_ws = TRUE)
AgencyEN <- AgencyEN[which(as.numeric(AgencyEN$PID) %in% Participants$PID),]
### VARIABLE SETUP
colnames(AgencyEN)[colnames(AgencyEN)=="Q25"] <- "MoodValence"
AgencyEN$MoodValence <- as.numeric(AgencyEN$MoodValence)
AgencyEN$MoodValence[which(AgencyEN$MoodValence==1 | AgencyEN$MoodValence==2)] <- 1
AgencyEN$MoodValence[which(AgencyEN$MoodValence==3 | AgencyEN$MoodValence==4)] <- 2
AgencyEN$MoodValence[which(AgencyEN$MoodValence==5)] <- 3
AgencyEN$MoodValence[which(AgencyEN$MoodValence==6 | AgencyEN$MoodValence==7)] <- 4
AgencyEN$MoodValence[which(AgencyEN$MoodValence==8 | AgencyEN$MoodValence==9)] <- 5
colnames(AgencyEN)[colnames(AgencyEN)=="Q37_1"] <- "Arousal"
unique(AgencyEN$Arousal)
AgencyEN$Arousal <- as.numeric(AgencyEN$Arousal)
colnames(AgencyEN)[colnames(AgencyEN)=="Q34_1"] <- "ClosenessMindTracking"
unique(AgencyEN$ClosenessMindTracking)
colnames(AgencyEN)[colnames(AgencyEN)=="Q35_1"] <- "ClosenessActionTracking"
unique(AgencyEN$ClosenessActionTracking)
colnames(AgencyEN)[colnames(AgencyEN)=="Q32"] <- "Tracking"
AgencyEN$Tracking <- factor(AgencyEN$Tracking, levels = c("1", "2"), labels=c("Yes","No"))
unique(AgencyEN$Tracking)
# Turning characters to numeric
cols.num <- c(3:5, 7:12)
AgencyEN[cols.num] <- sapply(AgencyEN[cols.num],as.numeric)
colnames(AgencyEN)[colnames(AgencyEN)=="Q3"] <- "Location"
colnames(AgencyEN)[colnames(AgencyEN)=="Q12"] <- "Sociability"
AgencyEN$Location <- factor(AgencyEN$Location,
levels = c("1", "2", "3","4","5","6"),
labels=c("home","work/school","outdoor-public","indoor-public","public-transport","walking"))
AgencyEN$Sociability <- factor(AgencyEN$Sociability,
levels = c("1", "4","5","6","7","8","9","10","11","12"),
labels=c("alone","not together","stranger","colleague","friend","family member","2+ strangers","2+ colleagues","2+ friends","2+ family members"))
# Agency Questions
AgencyEN$Q1 <- 8-AgencyEN$Q1
AgencyEN$Q2 <- 8-AgencyEN$Q2
colnames(AgencyEN)[colnames(AgencyEN)=="Q6"] <- "Q3"
colnames(AgencyEN)[colnames(AgencyEN)=="Q33"] <- "Q4"
AgencyEN$Q4 <- AgencyEN$Q4-4
colnames(AgencyEN)[colnames(AgencyEN)=="Q26"] <- "Others"
AgencyEN$Others <- factor(AgencyEN$Others, levels = c("1", "2"), labels=c("Yes","No"))
colnames(AgencyEN)[colnames(AgencyEN)=="Q29"] <- "Q5"
AgencyEN$Q5 <- AgencyEN$Q5-3
colnames(AgencyEN)[colnames(AgencyEN)=="Q19"] <- "Learnings"
colnames(AgencyEN)[colnames(AgencyEN)=="Q15_1"] <- "TimeLag"
colnames(AgencyEN)[colnames(AgencyEN)=="Q21"] <- "Goal"
AgencyEN$Goal <- factor(AgencyEN$Goal, levels = c("1", "2", "3", "4"), labels=c("Indifferent","Overperform","OnTrack", "Behind"))
# Turning characters to numeric
cols.num <- c(2:5, 7:10, 12, 14, 18:20, 24:26, 28 )
AgencyEN[cols.num] <- sapply(AgencyEN[cols.num],as.numeric)
AgencyEN$Source <- 2
AgencyEN$Source <- factor(AgencyEN$Source, levels = c("1", "2"), labels=c("German","English"))
# Tests
sapply(AgencyEN, class)
save(AgencyEN, file="AgencyEN.Rda")
| /Sense of Agency via ESM and multilevel modelling/1 - AgencyEN cleanup.R | no_license | kusterlu/publish | R | false | false | 4,955 | r | setwd("C:/Users/Ludwig/OneDrive - London School of Economics/LSE/Dissertation/Data Analysis/Raw Data Qualtrics")
AgencyEN <- read_delim("Agency ALL ENGISH.CSV",";", escape_double = FALSE, trim_ws = TRUE)
#[1] "StartDate" "EndDate" "Status"
#[4] "IPAddress" "Progress" "Duration (in seconds)"
#[7] "Finished" "RecordedDate" "ResponseId"
#[10] "RecipientLastName" "RecipientFirstName" "RecipientEmail"
#[13] "ExternalReference" "LocationLatitude" "LocationLongitude"
#[16] "DistributionChannel" "UserLanguage" "Q25"
#[19] "Q37_1" "Q18" "Q34_1"
#[22] "D2" "Q35_1" "Q32"
#[25] "Q1" "Q2" "Q6"
#[28] "Q33" "Q26" "Q29"
#[31] "Q19" "Q15_1" "Q21"
#[34] "Q3" "Q10" "Q12"
#[37] "SSID" "PID" "SMS"
#[40] "DAY" "SIG" "RDate"
#[43] "TIME" "RT" "TimeZone"
#[46] "addcode" "RID"
names(AgencyEN)[48] <- "MindRaw"
names(AgencyEN)[49] <- "MindCode"
names(AgencyEN)[50] <- "MindValence"
names(AgencyEN)[51] <- "Congruence"
names(AgencyEN)[52] <- "ActionRaw"
names(AgencyEN)[53] <- "ActionCode"
# Throw out variables
AgencyEN <- AgencyEN[ -c(1:2), -c(2:17, 20, 22, 35, 37, 39, 42, 45:47) ]
# Remove empty rows
library(dplyr)
library(janitor)
AgencyEN <- AgencyEN %>% remove_empty("rows")
Participants <- read_delim("a--Participants.csv", ";", escape_double = FALSE, trim_ws = TRUE)
AgencyEN <- AgencyEN[which(as.numeric(AgencyEN$PID) %in% Participants$PID),]
### VARIABLE SETUP
colnames(AgencyEN)[colnames(AgencyEN)=="Q25"] <- "MoodValence"
AgencyEN$MoodValence <- as.numeric(AgencyEN$MoodValence)
AgencyEN$MoodValence[which(AgencyEN$MoodValence==1 | AgencyEN$MoodValence==2)] <- 1
AgencyEN$MoodValence[which(AgencyEN$MoodValence==3 | AgencyEN$MoodValence==4)] <- 2
AgencyEN$MoodValence[which(AgencyEN$MoodValence==5)] <- 3
AgencyEN$MoodValence[which(AgencyEN$MoodValence==6 | AgencyEN$MoodValence==7)] <- 4
AgencyEN$MoodValence[which(AgencyEN$MoodValence==8 | AgencyEN$MoodValence==9)] <- 5
colnames(AgencyEN)[colnames(AgencyEN)=="Q37_1"] <- "Arousal"
unique(AgencyEN$Arousal)
AgencyEN$Arousal <- as.numeric(AgencyEN$Arousal)
colnames(AgencyEN)[colnames(AgencyEN)=="Q34_1"] <- "ClosenessMindTracking"
unique(AgencyEN$ClosenessMindTracking)
colnames(AgencyEN)[colnames(AgencyEN)=="Q35_1"] <- "ClosenessActionTracking"
unique(AgencyEN$ClosenessActionTracking)
colnames(AgencyEN)[colnames(AgencyEN)=="Q32"] <- "Tracking"
AgencyEN$Tracking <- factor(AgencyEN$Tracking, levels = c("1", "2"), labels=c("Yes","No"))
unique(AgencyEN$Tracking)
# Turning characters to numeric
cols.num <- c(3:5, 7:12)
AgencyEN[cols.num] <- sapply(AgencyEN[cols.num],as.numeric)
colnames(AgencyEN)[colnames(AgencyEN)=="Q3"] <- "Location"
colnames(AgencyEN)[colnames(AgencyEN)=="Q12"] <- "Sociability"
AgencyEN$Location <- factor(AgencyEN$Location,
levels = c("1", "2", "3","4","5","6"),
labels=c("home","work/school","outdoor-public","indoor-public","public-transport","walking"))
AgencyEN$Sociability <- factor(AgencyEN$Sociability,
levels = c("1", "4","5","6","7","8","9","10","11","12"),
labels=c("alone","not together","stranger","colleague","friend","family member","2+ strangers","2+ colleagues","2+ friends","2+ family members"))
# Agency Questions
AgencyEN$Q1 <- 8-AgencyEN$Q1
AgencyEN$Q2 <- 8-AgencyEN$Q2
colnames(AgencyEN)[colnames(AgencyEN)=="Q6"] <- "Q3"
colnames(AgencyEN)[colnames(AgencyEN)=="Q33"] <- "Q4"
AgencyEN$Q4 <- AgencyEN$Q4-4
colnames(AgencyEN)[colnames(AgencyEN)=="Q26"] <- "Others"
AgencyEN$Others <- factor(AgencyEN$Others, levels = c("1", "2"), labels=c("Yes","No"))
colnames(AgencyEN)[colnames(AgencyEN)=="Q29"] <- "Q5"
AgencyEN$Q5 <- AgencyEN$Q5-3
colnames(AgencyEN)[colnames(AgencyEN)=="Q19"] <- "Learnings"
colnames(AgencyEN)[colnames(AgencyEN)=="Q15_1"] <- "TimeLag"
colnames(AgencyEN)[colnames(AgencyEN)=="Q21"] <- "Goal"
AgencyEN$Goal <- factor(AgencyEN$Goal, levels = c("1", "2", "3", "4"), labels=c("Indifferent","Overperform","OnTrack", "Behind"))
# Turning characters to numeric
cols.num <- c(2:5, 7:10, 12, 14, 18:20, 24:26, 28 )
AgencyEN[cols.num] <- sapply(AgencyEN[cols.num],as.numeric)
AgencyEN$Source <- 2
AgencyEN$Source <- factor(AgencyEN$Source, levels = c("1", "2"), labels=c("German","English"))
# Tests
sapply(AgencyEN, class)
save(AgencyEN, file="AgencyEN.Rda")
|
#' Simplified Reference Tissue Model 2
#'
#' Function to fit the SRTM2 model of Wu and Carson (2002) to data.
#'
#' @param t_tac Numeric vector of times for each frame in minutes. We use the
#' time halfway through the frame as well as a zero. If a time zero frame is
#' not included, it will be added.
#' @param reftac Numeric vector of radioactivity concentrations in the reference
#' tissue for each frame. We include zero at time zero: if not included, it is
#' added.
#' @param roitac Numeric vector of radioactivity concentrations in the target
#' tissue for each frame. We include zero at time zero: if not included, it is
#' added.
#' @param k2prime Optional. If empty, then the model will fit a value of k2prime.
#' If specified, the model will be fitted with this parameter set (i.e. as a
#' 2 parameter model).
#' @param weights Optional. Numeric vector of the weights assigned to each frame
#' in the fitting. We include zero at time zero: if not included, it is added.
#' If not specified, uniform weights will be used.
#' @param frameStartEnd Optional. This allows one to specify the beginning and
#' final frame to use for modelling, e.g. c(1,20). This is to assess time
#' stability.
#' @param R1.start Optional. Starting parameter for fitting of R1. Default is 1.
#' @param R1.lower Optional. Lower bound for the fitting of R1. Default is 0.
#' @param R1.upper Optional. Upper bound for the fitting of R1. Default is 10.
#' @param k2prime.start Optional. Starting parameter for fitting of k2prime. Default is
#' 0.1.
#' @param k2prime.lower Optional. Lower bound for the fitting of k2prime. Default is 0.001.
#' @param k2prime.upper Optional. Upper bound for the fitting of k2prime. Default is 1.
#' @param bp.start Optional. Starting parameter for fitting of bp. Default is
#' 1.5.
#' @param bp.lower Optional. Lower bound for the fitting of bp. Default is -10.
#' @param bp.upper Optional. Upper bound for the fitting of bp. Default is 15.
#' @param multstart_iter Number of iterations for starting parameters. Default
#' is 1. For more information, see \code{\link[nls.multstart]{nls_multstart}}.
#' If specified as 1 for any parameters, the original starting value will be
#' used, and the multstart_lower and multstart_upper values ignored.
#' @param multstart_lower Optional. Lower bounds for starting parameters.
#' Defaults to the lower bounds. Named list of whichever parameters' starting
#' bounds should be altered.
#' @param multstart_upper Optional. Upper bounds for starting parameters.
#' Defaults to the upper bounds. Named list of whichever parameters' starting
#' bounds should be altered.
#' @param printvals Optional. This displays the parameter values for each
#' iteration of the model. This is useful for debugging and changing starting
#' values and upper and lower bounds for parameters.
#'
#' @return A list with a data frame of the fitted parameters \code{out$par},
#' their percentage standard errors \code{out$par.se}, the model fit object
#' \code{out$fit}, the model weights \code{out$weights}, and a dataframe
#' containing the TACs both of the data and the fitted values \code{out$tacs}.
#'
#' @examples
#'
#' data(simref)
#'
#' t_tac <- simref$tacs[[2]]$Times
#' reftac <- simref$tacs[[2]]$Reference
#' roitac <- simref$tacs[[2]]$ROI1
#' weights <- simref$tacs[[2]]$Weights
#'
#' fit_fitk2prime <- srtm2(t_tac, reftac, roitac)
#' fit_setk2prime <- srtm2(t_tac, reftac, roitac, k2prime=0.1)
#' @author Granville J Matheson, \email{mathesong@@gmail.com}
#'
#' @references Wu Y, Carson RE. Noise reduction in the simplified reference tissue
#' model for neuroreceptor functional imaging. J Cereb Blood Flow Metab.
#' 2002;22:1440-1452.
#'
#'
#'
#' @export
srtm2 <- function(t_tac, reftac, roitac, k2prime=NULL, weights = NULL, frameStartEnd = NULL,
R1.start = 1, R1.lower = 0, R1.upper = 10,
k2prime.start = 0.1, k2prime.lower = 0.001, k2prime.upper = 1,
bp.start = 1.5, bp.lower = -10, bp.upper = 15,
multstart_iter = 1, multstart_lower = NULL, multstart_upper = NULL,
printvals = F) {
# Tidying
tidyinput <- tidyinput_ref(t_tac, reftac, roitac, weights, frameStartEnd)
modeldata <- tidyinput
# Parameters
if(is.null(k2prime)) {
start <- c(R1 = R1.start, k2prime = k2prime.start, bp = bp.start)
lower <- c(R1 = R1.lower, k2prime = k2prime.lower, bp = bp.lower)
upper <- c(R1 = R1.upper, k2prime = k2prime.upper, bp = bp.upper)
} else {
if(length(k2prime) > 1) {
stop("k2prime must be specified by a single value.")
}
start <- c(R1 = R1.start, bp = bp.start)
lower <- c(R1 = R1.lower, bp = bp.lower)
upper <- c(R1 = R1.upper, bp = bp.upper)
}
multstart_pars <- fix_multstartpars(
start, lower, upper, multstart_iter,
multstart_lower, multstart_upper
)
multstart_upper <- multstart_pars$multstart_upper
multstart_lower <- multstart_pars$multstart_lower
# Solution
formula <- paste0("roitac ~ srtm2_model(t_tac, reftac, R1, k2prime",
ifelse(is.null(k2prime),
yes = "",
no = paste0("=", k2prime)),
", bp)")
if (prod(multstart_iter) == 1) {
output <- minpack.lm::nlsLM(
formula = as.formula(formula),
data = modeldata,
start = start, lower = lower, upper = upper,
weights = weights,
control = minpack.lm::nls.lm.control(maxiter = 200),
trace = printvals
)
} else {
output <- nls.multstart::nls_multstart(
formula = as.formula(formula),
data = modeldata,
supp_errors = "Y",
start_lower = multstart_lower,
start_upper = multstart_upper,
iter = multstart_iter, convergence_count = FALSE,
lower = lower, upper = upper, modelweights = weights
)
}
# Check for parameters hitting limits
limcheck_u <- purrr::map2_lgl(round(upper,3), round(coef(output),3), identical)
limcheck_l <- purrr::map2_lgl(round(lower,3), round(coef(output),3), identical)
limcheck <- limcheck_u + limcheck_l
limcheck <- limcheck==1
if(
any(limcheck)
) {
warning(
paste0(
"Fitted parameters are hitting upper or lower limit bounds. Consider \n",
"either modifying the upper and lower limit boundaries, or else using \n",
"multstart when fitting the model (see the function documentation).") )
}
# Output
tacs <- data.frame(
Time = modeldata$t_tac,
Reference = modeldata$reftac,
Target = modeldata$roitac,
Target_fitted = as.numeric(fitted(output))
)
# Coefficients
par <- as.data.frame(as.list(coef(output)))
par.se <- par
par.se[1,] <- purrr::map_dbl(names(coef(output)), ~ get_se(output, .x))
names(par.se) <- paste0(names(par.se), ".se")
if(!is.null(k2prime)) {
par$k2prime = k2prime
par.se$k2prime=0
}
# Derived
par$k2a = with(par, (R1 * k2prime) / (bp + 1) )
par.se$k2a.se <- get_se(output, "(R1 * k2prime) / (bp + 1)")
out <- list(
par = par, par.se = par.se,
fit = output, weights = modeldata$weights, tacs = tacs,
model = "srtm2"
)
class(out) <- c("srtm2", "kinfit")
return(out)
}
#' Model: Simplified Reference Tissue Model 2
#'
#' This is the SRTM2 model itself by which predicted values are generated.
#'
#' @param t_tac Numeric vector of times for each frame in minutes. We use the time halfway through the frame as well as a zero.
#' @param reftac Numeric vector of radioactivity concentrations in the reference tissue for each frame.
#' @param R1 Parameter value for R1
#' @param k2prime Parameter value for k2prime
#' @param bp Parameter value for bp
#'
#' @return A numeric vector of the predicted values of the TAC in the target region.
#'
#' @examples
#' \dontrun{
#' srtm2_model(t_tac, reftac, R1 = 0.9, k2prime = 0.1, bp = 0.1)
#' }
#'
#' @author Granville J Matheson, \email{mathesong@@gmail.com}
#'
#' @references Wu Y, Carson RE. Noise reduction in the simplified reference tissue
#' model for neuroreceptor functional imaging. J Cereb Blood Flow Metab.
#' 2002;22:1440-1452.
#'
#' @export
srtm2_model <- function(t_tac, reftac, R1, k2prime, bp) {
interptime <- pracma::linspace(min(t_tac), max(t_tac), 1024)
step <- interptime[2] - interptime[1]
iref <- pracma::interp1(t_tac, reftac, interptime, method = "linear")
k2a <- (R1 * k2prime) / (bp + 1)
a <- R1 * (k2prime - k2a) * iref
b <- exp(-k2a * interptime)
ND <- R1 * iref
BOUND <- kinfit_convolve(a, b, step)
i_outtac <- ND + BOUND
outtac <- pracma::interp1(interptime, i_outtac, t_tac)
return(outtac)
}
#' Plot: Simplified Reference Tissue Model 2
#'
#' Function to visualise the fit of the SRTM2 model to data.
#'
#' @param srtm2out The output object of the SRTM2 fitting procedure.
#' @param roiname Optional. The name of the Target Region to see it on the plot.
#' @param refname Optional. The name of the Reference Region to see it on the plot.
#'
#' @return A ggplot2 object of the plot.
#'
#' @examples
#' data(simref)
#'
#' t_tac <- simref$tacs[[2]]$Times
#' reftac <- simref$tacs[[2]]$Reference
#' roitac <- simref$tacs[[2]]$ROI1
#' weights <- simref$tacs[[2]]$Weights
#'
#' fit <- srtm2(t_tac, reftac, roitac, weights=weights)
#'
#' plot_srtm2fit(fit)
#' @author Granville J Matheson, \email{mathesong@@gmail.com}
#'
#' @import ggplot2
#'
#' @export
plot_srtm2fit <- function(srtm2out, roiname = NULL, refname = NULL) {
measured <- data.frame(
Time = srtm2out$tacs$Time,
Reference = srtm2out$tacs$Reference,
ROI.measured = srtm2out$tacs$Target,
Weights = weights(srtm2out$fit)
)
fitted <- data.frame(
Time = srtm2out$tacs$Time,
ROI.fitted = srtm2out$tacs$Target_fitted,
Weights = weights(srtm2out$fit)
)
if (is.null(roiname)) {
roiname <- "ROI"
}
if (is.null(refname)) {
refname <- "Reference"
}
measured <- plyr::rename(measured, c(
"ROI.measured" = paste0(roiname, ".measured"),
"Reference" = refname
))
fitted <- plyr::rename(fitted, c("ROI.fitted" = paste0(roiname, ".fitted")))
tidymeasured <- tidyr::gather(
measured,
key = Region, value = Radioactivity,
-Time, -Weights, factor_key = F
)
tidyfitted <- tidyr::gather(
fitted,
key = Region, value = Radioactivity,
-Time, -Weights, factor_key = F
)
Region <- forcats::fct_inorder(factor(c(tidymeasured$Region, tidyfitted$Region)))
myColors <- RColorBrewer::brewer.pal(3, "Set1")
names(myColors) <- levels(Region)
colScale <- scale_colour_manual(name = "Region", values = myColors)
outplot <- ggplot(tidymeasured, aes(x = Time, y = Radioactivity, colour = Region)) +
geom_point(data = tidymeasured, aes(shape = "a", size = Weights)) +
geom_line(data = tidyfitted) +
guides(shape = FALSE, color = guide_legend(order = 1)) + colScale +
scale_size(range = c(1, 3)) +
coord_cartesian(ylim = c(0, max(tidymeasured$Radioactivity)))
return(outplot)
}
| /R/kinfitr_srtm2.R | no_license | kang2000h/kinfitr | R | false | false | 11,041 | r | #' Simplified Reference Tissue Model 2
#'
#' Function to fit the SRTM2 model of Wu and Carson (2002) to data.
#'
#' @param t_tac Numeric vector of times for each frame in minutes. We use the
#' time halfway through the frame as well as a zero. If a time zero frame is
#' not included, it will be added.
#' @param reftac Numeric vector of radioactivity concentrations in the reference
#' tissue for each frame. We include zero at time zero: if not included, it is
#' added.
#' @param roitac Numeric vector of radioactivity concentrations in the target
#' tissue for each frame. We include zero at time zero: if not included, it is
#' added.
#' @param k2prime Optional. If empty, then the model will fit a value of k2prime.
#' If specified, the model will be fitted with this parameter set (i.e. as a
#' 2 parameter model).
#' @param weights Optional. Numeric vector of the weights assigned to each frame
#' in the fitting. We include zero at time zero: if not included, it is added.
#' If not specified, uniform weights will be used.
#' @param frameStartEnd Optional. This allows one to specify the beginning and
#' final frame to use for modelling, e.g. c(1,20). This is to assess time
#' stability.
#' @param R1.start Optional. Starting parameter for fitting of R1. Default is 1.
#' @param R1.lower Optional. Lower bound for the fitting of R1. Default is 0.
#' @param R1.upper Optional. Upper bound for the fitting of R1. Default is 10.
#' @param k2prime.start Optional. Starting parameter for fitting of k2prime. Default is
#' 0.1.
#' @param k2prime.lower Optional. Lower bound for the fitting of k2prime. Default is 0.001.
#' @param k2prime.upper Optional. Upper bound for the fitting of k2prime. Default is 1.
#' @param bp.start Optional. Starting parameter for fitting of bp. Default is
#' 1.5.
#' @param bp.lower Optional. Lower bound for the fitting of bp. Default is -10.
#' @param bp.upper Optional. Upper bound for the fitting of bp. Default is 15.
#' @param multstart_iter Number of iterations for starting parameters. Default
#' is 1. For more information, see \code{\link[nls.multstart]{nls_multstart}}.
#' If specified as 1 for any parameters, the original starting value will be
#' used, and the multstart_lower and multstart_upper values ignored.
#' @param multstart_lower Optional. Lower bounds for starting parameters.
#' Defaults to the lower bounds. Named list of whichever parameters' starting
#' bounds should be altered.
#' @param multstart_upper Optional. Upper bounds for starting parameters.
#' Defaults to the upper bounds. Named list of whichever parameters' starting
#' bounds should be altered.
#' @param printvals Optional. This displays the parameter values for each
#' iteration of the model. This is useful for debugging and changing starting
#' values and upper and lower bounds for parameters.
#'
#' @return A list with a data frame of the fitted parameters \code{out$par},
#' their percentage standard errors \code{out$par.se}, the model fit object
#' \code{out$fit}, the model weights \code{out$weights}, and a dataframe
#' containing the TACs both of the data and the fitted values \code{out$tacs}.
#'
#' @examples
#'
#' data(simref)
#'
#' t_tac <- simref$tacs[[2]]$Times
#' reftac <- simref$tacs[[2]]$Reference
#' roitac <- simref$tacs[[2]]$ROI1
#' weights <- simref$tacs[[2]]$Weights
#'
#' fit_fitk2prime <- srtm2(t_tac, reftac, roitac)
#' fit_setk2prime <- srtm2(t_tac, reftac, roitac, k2prime=0.1)
#' @author Granville J Matheson, \email{mathesong@@gmail.com}
#'
#' @references Wu Y, Carson RE. Noise reduction in the simplified reference tissue
#' model for neuroreceptor functional imaging. J Cereb Blood Flow Metab.
#' 2002;22:1440-1452.
#'
#'
#'
#' @export
srtm2 <- function(t_tac, reftac, roitac, k2prime=NULL, weights = NULL, frameStartEnd = NULL,
R1.start = 1, R1.lower = 0, R1.upper = 10,
k2prime.start = 0.1, k2prime.lower = 0.001, k2prime.upper = 1,
bp.start = 1.5, bp.lower = -10, bp.upper = 15,
multstart_iter = 1, multstart_lower = NULL, multstart_upper = NULL,
printvals = F) {
# Tidying
tidyinput <- tidyinput_ref(t_tac, reftac, roitac, weights, frameStartEnd)
modeldata <- tidyinput
# Parameters
if(is.null(k2prime)) {
start <- c(R1 = R1.start, k2prime = k2prime.start, bp = bp.start)
lower <- c(R1 = R1.lower, k2prime = k2prime.lower, bp = bp.lower)
upper <- c(R1 = R1.upper, k2prime = k2prime.upper, bp = bp.upper)
} else {
if(length(k2prime) > 1) {
stop("k2prime must be specified by a single value.")
}
start <- c(R1 = R1.start, bp = bp.start)
lower <- c(R1 = R1.lower, bp = bp.lower)
upper <- c(R1 = R1.upper, bp = bp.upper)
}
multstart_pars <- fix_multstartpars(
start, lower, upper, multstart_iter,
multstart_lower, multstart_upper
)
multstart_upper <- multstart_pars$multstart_upper
multstart_lower <- multstart_pars$multstart_lower
# Solution
formula <- paste0("roitac ~ srtm2_model(t_tac, reftac, R1, k2prime",
ifelse(is.null(k2prime),
yes = "",
no = paste0("=", k2prime)),
", bp)")
if (prod(multstart_iter) == 1) {
output <- minpack.lm::nlsLM(
formula = as.formula(formula),
data = modeldata,
start = start, lower = lower, upper = upper,
weights = weights,
control = minpack.lm::nls.lm.control(maxiter = 200),
trace = printvals
)
} else {
output <- nls.multstart::nls_multstart(
formula = as.formula(formula),
data = modeldata,
supp_errors = "Y",
start_lower = multstart_lower,
start_upper = multstart_upper,
iter = multstart_iter, convergence_count = FALSE,
lower = lower, upper = upper, modelweights = weights
)
}
# Check for parameters hitting limits
limcheck_u <- purrr::map2_lgl(round(upper,3), round(coef(output),3), identical)
limcheck_l <- purrr::map2_lgl(round(lower,3), round(coef(output),3), identical)
limcheck <- limcheck_u + limcheck_l
limcheck <- limcheck==1
if(
any(limcheck)
) {
warning(
paste0(
"Fitted parameters are hitting upper or lower limit bounds. Consider \n",
"either modifying the upper and lower limit boundaries, or else using \n",
"multstart when fitting the model (see the function documentation).") )
}
# Output
tacs <- data.frame(
Time = modeldata$t_tac,
Reference = modeldata$reftac,
Target = modeldata$roitac,
Target_fitted = as.numeric(fitted(output))
)
# Coefficients
par <- as.data.frame(as.list(coef(output)))
par.se <- par
par.se[1,] <- purrr::map_dbl(names(coef(output)), ~ get_se(output, .x))
names(par.se) <- paste0(names(par.se), ".se")
if(!is.null(k2prime)) {
par$k2prime = k2prime
par.se$k2prime=0
}
# Derived
par$k2a = with(par, (R1 * k2prime) / (bp + 1) )
par.se$k2a.se <- get_se(output, "(R1 * k2prime) / (bp + 1)")
out <- list(
par = par, par.se = par.se,
fit = output, weights = modeldata$weights, tacs = tacs,
model = "srtm2"
)
class(out) <- c("srtm2", "kinfit")
return(out)
}
#' Model: Simplified Reference Tissue Model 2
#'
#' This is the SRTM2 model itself by which predicted values are generated.
#'
#' @param t_tac Numeric vector of times for each frame in minutes. We use the time halfway through the frame as well as a zero.
#' @param reftac Numeric vector of radioactivity concentrations in the reference tissue for each frame.
#' @param R1 Parameter value for R1
#' @param k2prime Parameter value for k2prime
#' @param bp Parameter value for bp
#'
#' @return A numeric vector of the predicted values of the TAC in the target region.
#'
#' @examples
#' \dontrun{
#' srtm2_model(t_tac, reftac, R1 = 0.9, k2prime = 0.1, bp = 0.1)
#' }
#'
#' @author Granville J Matheson, \email{mathesong@@gmail.com}
#'
#' @references Wu Y, Carson RE. Noise reduction in the simplified reference tissue
#' model for neuroreceptor functional imaging. J Cereb Blood Flow Metab.
#' 2002;22:1440-1452.
#'
#' @export
srtm2_model <- function(t_tac, reftac, R1, k2prime, bp) {
interptime <- pracma::linspace(min(t_tac), max(t_tac), 1024)
step <- interptime[2] - interptime[1]
iref <- pracma::interp1(t_tac, reftac, interptime, method = "linear")
k2a <- (R1 * k2prime) / (bp + 1)
a <- R1 * (k2prime - k2a) * iref
b <- exp(-k2a * interptime)
ND <- R1 * iref
BOUND <- kinfit_convolve(a, b, step)
i_outtac <- ND + BOUND
outtac <- pracma::interp1(interptime, i_outtac, t_tac)
return(outtac)
}
#' Plot: Simplified Reference Tissue Model 2
#'
#' Function to visualise the fit of the SRTM2 model to data.
#'
#' @param srtm2out The output object of the SRTM2 fitting procedure.
#' @param roiname Optional. The name of the Target Region to see it on the plot.
#' @param refname Optional. The name of the Reference Region to see it on the plot.
#'
#' @return A ggplot2 object of the plot.
#'
#' @examples
#' data(simref)
#'
#' t_tac <- simref$tacs[[2]]$Times
#' reftac <- simref$tacs[[2]]$Reference
#' roitac <- simref$tacs[[2]]$ROI1
#' weights <- simref$tacs[[2]]$Weights
#'
#' fit <- srtm2(t_tac, reftac, roitac, weights=weights)
#'
#' plot_srtm2fit(fit)
#' @author Granville J Matheson, \email{mathesong@@gmail.com}
#'
#' @import ggplot2
#'
#' @export
plot_srtm2fit <- function(srtm2out, roiname = NULL, refname = NULL) {
measured <- data.frame(
Time = srtm2out$tacs$Time,
Reference = srtm2out$tacs$Reference,
ROI.measured = srtm2out$tacs$Target,
Weights = weights(srtm2out$fit)
)
fitted <- data.frame(
Time = srtm2out$tacs$Time,
ROI.fitted = srtm2out$tacs$Target_fitted,
Weights = weights(srtm2out$fit)
)
if (is.null(roiname)) {
roiname <- "ROI"
}
if (is.null(refname)) {
refname <- "Reference"
}
measured <- plyr::rename(measured, c(
"ROI.measured" = paste0(roiname, ".measured"),
"Reference" = refname
))
fitted <- plyr::rename(fitted, c("ROI.fitted" = paste0(roiname, ".fitted")))
tidymeasured <- tidyr::gather(
measured,
key = Region, value = Radioactivity,
-Time, -Weights, factor_key = F
)
tidyfitted <- tidyr::gather(
fitted,
key = Region, value = Radioactivity,
-Time, -Weights, factor_key = F
)
Region <- forcats::fct_inorder(factor(c(tidymeasured$Region, tidyfitted$Region)))
myColors <- RColorBrewer::brewer.pal(3, "Set1")
names(myColors) <- levels(Region)
colScale <- scale_colour_manual(name = "Region", values = myColors)
outplot <- ggplot(tidymeasured, aes(x = Time, y = Radioactivity, colour = Region)) +
geom_point(data = tidymeasured, aes(shape = "a", size = Weights)) +
geom_line(data = tidyfitted) +
guides(shape = FALSE, color = guide_legend(order = 1)) + colScale +
scale_size(range = c(1, 3)) +
coord_cartesian(ylim = c(0, max(tidymeasured$Radioactivity)))
return(outplot)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_transcriptID.R
\name{convert_transcriptID}
\alias{convert_transcriptID}
\title{Convert transcript IDs between different databases}
\usage{
convert_transcriptID(
dat,
db,
biomart_ens = "ensembl",
dat_ens = "hsapiens_gene_ensembl",
dat_filter = "refseq_mrna",
BM_att_ens = c("refseq_mrna", "ensembl_transcript_id", "hgnc_symbol", "ucsc")
)
}
\arguments{
\item{dat}{a dataframe including Transcript_version, nucleotide "C1768G"
refers to ref/CDS position/alt.}
\item{db}{`EnsDb` object. Default is EnsDb.Hsapiens.v75.}
\item{biomart_ens}{selection of BioMart database. Default is "ensembl".}
\item{dat_ens}{BioMart databases includes many datasets. Choose dataset in the database.
Default is "hsapiens_gene_ensembl". Alternatives include "drerio_gene_ensembl" and "mmusculus_gene_ensembl".}
\item{dat_filter}{refers to the types of transcripts. Default is "refseq_mrna". The user can also specify "ucsc" as transcript ID.}
\item{BM_att_ens}{defines the values of interests.
Default shows "refseq_mrna", "ensembl_transcript_id", "hgnc_symbol", "ucsc".
The listAttributes function displays all available attributes in the selected dataset.}
}
\value{
a new dataset with converting information
}
\description{
Preprocessing, converting and mapping
}
\examples{
library(EnsDb.Hsapiens.v75)
db=EnsDb.Hsapiens.v75::EnsDb.Hsapiens.v75
dat<-read.csv(system.file("extdata",
"convertID_refseq_data.csv",
package = "TransAT"),
stringsAsFactors = FALSE, encoding = "UTF-8", row.names = NULL, sep = ",")
new_dat<-convert_transcriptID(dat, db, dat_filter = "refseq_mrna")
}
| /man/convert_transcriptID.Rd | no_license | junyu-boston/TransAT | R | false | true | 1,727 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_transcriptID.R
\name{convert_transcriptID}
\alias{convert_transcriptID}
\title{Convert transcript IDs between different databases}
\usage{
convert_transcriptID(
dat,
db,
biomart_ens = "ensembl",
dat_ens = "hsapiens_gene_ensembl",
dat_filter = "refseq_mrna",
BM_att_ens = c("refseq_mrna", "ensembl_transcript_id", "hgnc_symbol", "ucsc")
)
}
\arguments{
\item{dat}{a dataframe including Transcript_version, nucleotide "C1768G"
refers to ref/CDS position/alt.}
\item{db}{`EnsDb` object. Default is EnsDb.Hsapiens.v75.}
\item{biomart_ens}{selection of BioMart database. Default is "ensembl".}
\item{dat_ens}{BioMart databases includes many datasets. Choose dataset in the database.
Default is "hsapiens_gene_ensembl". Alternatives include "drerio_gene_ensembl" and "mmusculus_gene_ensembl".}
\item{dat_filter}{refers to the types of transcripts. Default is "refseq_mrna". The user can also specify "ucsc" as transcript ID.}
\item{BM_att_ens}{defines the values of interests.
Default shows "refseq_mrna", "ensembl_transcript_id", "hgnc_symbol", "ucsc".
The listAttributes function displays all available attributes in the selected dataset.}
}
\value{
a new dataset with converting information
}
\description{
Preprocessing, converting and mapping
}
\examples{
library(EnsDb.Hsapiens.v75)
db=EnsDb.Hsapiens.v75::EnsDb.Hsapiens.v75
dat<-read.csv(system.file("extdata",
"convertID_refseq_data.csv",
package = "TransAT"),
stringsAsFactors = FALSE, encoding = "UTF-8", row.names = NULL, sep = ",")
new_dat<-convert_transcriptID(dat, db, dat_filter = "refseq_mrna")
}
|
######################################################################
## plot4.R
## -----------------------
## Description:
## This is the script for extracting the
## "Individual household electric power consumption Data Set"
## and plots a graphic with the Energy sub metering and time.
##
##
######################################################################
plot4 <- function (rds_path = "") {
message("Executing!")
#####################################################
##1.Checks if the rds_path contains file path
#####################################################
if(rds_path == "")
{
message("File path was not specified or is not valid. This program will automatically download the file")
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
message("Loading data into R. Please Wait...")
dataset <- read.table(unz(temp, "household_power_consumption.txt"), sep = ";", header = TRUE)
unlink(temp)
}
else
{
if(!file.exists(rds_path))
{
message("The file path was not specified or is not valid.")
message("Process Finished.")
return(FALSE)
}
message("Loading data into R. Please Wait...")
dataset <- read.table(unz(rds_path, "household_power_consumption.txt"), sep = ";", header = TRUE)
}
#####################################################################
##2.Converts variables 2 and 1 to time and date formats respectively
#####################################################################
dataset$Time <- strptime(paste(dataset$Date, dataset$Time), "%d/%m/%Y %H:%M:%S")
dataset$Date <- as.Date(dataset$Date, "%d/%m/%Y")
comdata<- dataset
#####################################################################
##3.Filters observations with dates between
#####################################################################
comdata1 <- comdata[comdata$Time >= "2007-02-01 00:00:00" & comdata$Time < "2007-02-03 00:00:00" ,]
#####################################################################
##4.Converts variables 3-9 to numeric variables
#####################################################################
comdata1$Global_active_power <- as.double(as.character(comdata1$Global_active_power))
comdata1$Global_reactive_power <- as.double(as.character(comdata1$Global_reactive_power))
comdata1$Voltage <- as.double(as.character(comdata1$Voltage))
comdata1$Global_intensity <- as.double(as.character(comdata1$Global_intensity))
comdata1$Sub_metering_1 <- as.integer(as.character(comdata1$Sub_metering_1))
comdata1$Sub_metering_2 <- as.integer(as.character(comdata1$Sub_metering_2))
comdata1$Sub_metering_3 <- as.integer(as.character(comdata1$Sub_metering_3))
comdata2 <<- comdata1
#####################################################################
##5.Creates de png with the histogram
#####################################################################
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2,2), mar= c(4,4,2,1), oma= c(0,0,2,0))
with (comdata1,{
plot(Time,Global_active_power, type='l',ylab = "Global Active Power",xlab= "",cex.axis=0.8, cex.lab=0.8)
plot(Time,Voltage, type='l',ylab = "Voltage",xlab= "datetime",cex.axis=0.8, cex.lab=0.8)
plot(Time,Sub_metering_1,type ="l",ylim = c(0, 40),yaxp = c(0, 30, 3), ylab = "Energy sub metering", xlab ="",cex.axis=0.8, cex.lab=0.8)
lines(Time,Sub_metering_2,col ="RED")
lines(Time,Sub_metering_3,col ="BLUE")
legend("topright",col = c("black","red","blue"), legend = c("Sub_metering_1 ","Sub_metering_2 ","Sub_metering_3 "),lty=c(1,1,1),cex=0.7,bty = "n")
plot(Time,Global_reactive_power, type='l',xlab= "datetime",cex.axis=0.8, cex.lab=0.8)
})
dev.off()
message("Graphic saved as plot4.png in the current working directory")
} | /Files/plot4.R | no_license | ronaldraxon/ExData_Plotting1 | R | false | false | 3,886 | r | ######################################################################
## plot4.R
## -----------------------
## Description:
## This is the script for extracting the
## "Individual household electric power consumption Data Set"
## and plots a graphic with the Energy sub metering and time.
##
##
######################################################################
plot4 <- function (rds_path = "") {
message("Executing!")
#####################################################
##1.Checks if the rds_path contains file path
#####################################################
if(rds_path == "")
{
message("File path was not specified or is not valid. This program will automatically download the file")
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
message("Loading data into R. Please Wait...")
dataset <- read.table(unz(temp, "household_power_consumption.txt"), sep = ";", header = TRUE)
unlink(temp)
}
else
{
if(!file.exists(rds_path))
{
message("The file path was not specified or is not valid.")
message("Process Finished.")
return(FALSE)
}
message("Loading data into R. Please Wait...")
dataset <- read.table(unz(rds_path, "household_power_consumption.txt"), sep = ";", header = TRUE)
}
#####################################################################
##2.Converts variables 2 and 1 to time and date formats respectively
#####################################################################
dataset$Time <- strptime(paste(dataset$Date, dataset$Time), "%d/%m/%Y %H:%M:%S")
dataset$Date <- as.Date(dataset$Date, "%d/%m/%Y")
comdata<- dataset
#####################################################################
##3.Filters observations with dates between
#####################################################################
comdata1 <- comdata[comdata$Time >= "2007-02-01 00:00:00" & comdata$Time < "2007-02-03 00:00:00" ,]
#####################################################################
##4.Converts variables 3-9 to numeric variables
#####################################################################
comdata1$Global_active_power <- as.double(as.character(comdata1$Global_active_power))
comdata1$Global_reactive_power <- as.double(as.character(comdata1$Global_reactive_power))
comdata1$Voltage <- as.double(as.character(comdata1$Voltage))
comdata1$Global_intensity <- as.double(as.character(comdata1$Global_intensity))
comdata1$Sub_metering_1 <- as.integer(as.character(comdata1$Sub_metering_1))
comdata1$Sub_metering_2 <- as.integer(as.character(comdata1$Sub_metering_2))
comdata1$Sub_metering_3 <- as.integer(as.character(comdata1$Sub_metering_3))
comdata2 <<- comdata1
#####################################################################
##5.Creates de png with the histogram
#####################################################################
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2,2), mar= c(4,4,2,1), oma= c(0,0,2,0))
with (comdata1,{
plot(Time,Global_active_power, type='l',ylab = "Global Active Power",xlab= "",cex.axis=0.8, cex.lab=0.8)
plot(Time,Voltage, type='l',ylab = "Voltage",xlab= "datetime",cex.axis=0.8, cex.lab=0.8)
plot(Time,Sub_metering_1,type ="l",ylim = c(0, 40),yaxp = c(0, 30, 3), ylab = "Energy sub metering", xlab ="",cex.axis=0.8, cex.lab=0.8)
lines(Time,Sub_metering_2,col ="RED")
lines(Time,Sub_metering_3,col ="BLUE")
legend("topright",col = c("black","red","blue"), legend = c("Sub_metering_1 ","Sub_metering_2 ","Sub_metering_3 "),lty=c(1,1,1),cex=0.7,bty = "n")
plot(Time,Global_reactive_power, type='l',xlab= "datetime",cex.axis=0.8, cex.lab=0.8)
})
dev.off()
message("Graphic saved as plot4.png in the current working directory")
} |
\name{threetwo.dat}
\alias{threetwo.dat}
\docType{data}
\title{Three clusters two outliers}
\description{
These data contain again the two clusters of the 60:80 data, but now with the addition of a third cluster, units 141-158 and two outliers, units 159 and 160. The sizes of the groups are therefore 80, 60, 18 and 2
}
\usage{data(threetwo.dat)}
\format{
A data frame with 160 observations on the following 2 variables.
\describe{
\item{y1}{a numeric vector.}
\item{y2}{a numeric vector.}
}
}
\details{
The scatter plot shows that the second compact cluster of 18 observations is near the longer axis of the dispersed cluster of 80. The two outliers are together, approximately across the centroid of the dispersed group from the cluster of 60.
}
\source{
Atkinson, Riani and Cerioli (2004), p. 588-589; http://www.riani.it/arc.
}
\examples{
data(threetwo.dat)
}
\keyword{datasets}
| /man/threetwo.dat.Rd | no_license | cran/Rfwdmv | R | false | false | 907 | rd | \name{threetwo.dat}
\alias{threetwo.dat}
\docType{data}
\title{Three clusters two outliers}
\description{
These data contain again the two clusters of the 60:80 data, but now with the addition of a third cluster, units 141-158 and two outliers, units 159 and 160. The sizes of the groups are therefore 80, 60, 18 and 2
}
\usage{data(threetwo.dat)}
\format{
A data frame with 160 observations on the following 2 variables.
\describe{
\item{y1}{a numeric vector.}
\item{y2}{a numeric vector.}
}
}
\details{
The scatter plot shows that the second compact cluster of 18 observations is near the longer axis of the dispersed cluster of 80. The two outliers are together, approximately across the centroid of the dispersed group from the cluster of 60.
}
\source{
Atkinson, Riani and Cerioli (2004), p. 588-589; http://www.riani.it/arc.
}
\examples{
data(threetwo.dat)
}
\keyword{datasets}
|
source("functions and packages/functions.R")
source("master_scripts/plot_objects.R")
source("functions and packages/packages.R")
#read in gm data set (no drought) and Cibar(discrimination)-------------------------------------------------------
gmes <- read.csv("calculated_data/gmes_wellwatered.csv")
###get average by id
gm_agg <- summaryBy(Photo+ Cond + gm ~ chamber+id+leaf +light+temp+leaflight+Month, data=gmes, FUN=mean, keep.names=TRUE)
gm_agg$leaf <- gsub("s", "S", gm_agg$leaf)
gm_agg$light <- gsub("high", "Sun-light", gm_agg$light)
gm_agg$light <- gsub("low", "Shade-light", gm_agg$light)
##remove shade-high
gm_sunsha <- gm_agg[gm_agg$leaflight != "shade-high",]
gm_sunsha <- droplevels(gm_sunsha)
##sun-shade dataframes
sundat <- gm_sunsha[gm_sunsha$leaflight =="sun-high",]
shadat <- gm_sunsha[gm_sunsha$leaflight =="shade-low",]
##dfr with lights on
fleckdat <- gm_agg[gm_agg$leaflight == "shade-high",]
fleckdat <- droplevels(fleckdat)
#### GS vs A data: use gam for CI of non-linear relationship between A and gs----------------------------------------
library(mgcv)
#SUN leaves
sunmod <- gam(Photo ~ s(Cond, k=5), data=gm_sunsha, subset=leaflight=="sun-high")
#predict
#get apprpriate vector of gs from sun leaves
gsdat <- gm_sunsha[gm_sunsha$leaflight=="sun-high", "Cond"]
#generate sequence and then predict
gssun_seq <- seq(min(gsdat), max(gsdat), length=101)
gssun_pred <- predict(sunmod, newdata=data.frame(Cond=gssun_seq), se.fit=TRUE)
#ci and model fit
sunupr <- gssun_pred$fit + (2*gssun_pred$se.fit)
sunlwr <- gssun_pred$fit - (2*gssun_pred$se.fit)
#SHADE leaves
shamod <- gam(Photo ~ s(Cond, k=5), data=gm_sunsha, subset=leaflight=="shade-low")
#get apprpriate vector cond from sun leaves
gsdat2 <- gm_sunsha[gm_sunsha$leaflight=="shade-low", "Cond"]
#generate sequence and then predict
gssha_seq <- seq(min(gsdat2), max(gsdat2), length=101)
gssha_pred <- predict(shamod, newdata=data.frame(Cond=gssha_seq), type="link", se.fit=TRUE)
shaupr <- gssha_pred$fit + (2*gssha_pred$se.fit)
shalwr <- gssha_pred$fit - (2*gssha_pred$se.fit)
#SUNFLECK leaves
fleckmod <- gam(Photo ~ s(Cond, k=5), data=fleckdat)
#get apprpriate vector cond from sun leaves
gsfleck <- fleckdat[, "Cond"]
#generate sequence and then predict
gsfleck_seq <- seq(min(gsfleck), max(gsfleck), length=101)
gsfleck_pred <- predict(fleckmod, newdata=data.frame(Cond=gsfleck_seq), type="link", se.fit=TRUE)
fleckupr <- gsfleck_pred$fit + (2*gsfleck_pred$se.fit)
flecklwr <- gsfleck_pred$fit - (2*gsfleck_pred$se.fit)
#### Gm vs A data: use gam for CI of non-linear relationship between A and gs----------------------------------------------
##read bootstrapped data previosuly ran from sunshade phys script
agm_sun <- read.csv( "master_scripts/bootstrap_results/agm_sun.csv")
agm_sha <- read.csv( "master_scripts/bootstrap_results/agm_sha.csv")
agm_fleck <- read.csv( "master_scripts/bootstrap_results/agm_fleck.csv")
####PLOTTING: 2 panel figure with gm, gs, A---------------------------------------------------------------------------------
#1: panel Photosynthesis vs gs (gam plots)
windows(10, 12)
par(mfrow=c(2,1))
par(mar=c(4,4,1,1), cex=1.25, las=1, cex.axis=.8, cex.lab=1, mgp=c(2.5,1,0))
plot(Photo~Cond, data=gm_sunsha, subset=leaflight=="sun-high", col=suncol, ylim=c(5,25),
xlim=c(0,.5), xlab=condlab, ylab=satlab, pch=c(16, 17)[pch=gm_sunsha$temp])
lines(gssun_seq, sunupr, lty=2, lwd=2,col=suncol)
lines(gssun_seq, sunlwr, lty=2, lwd=2,col=suncol)
lines(gssun_seq, gssun_pred$fit, lty=1, lwd=2,col=suncol)
#shade
points(Photo~Cond, data=gm_sunsha, subset=leaflight=="shade-low",col=shacol,pch=c(16, 17)[pch=gm_sunsha$temp])
lines(gssha_seq, shaupr, lty=2, lwd=2,col=shacol)
lines(gssha_seq, shalwr, lty=2, lwd=2,col=shacol)
lines(gssha_seq, gssha_pred$fit, lty=1, lwd=2,col=shacol)
#sunfleck
points(Photo~Cond, data=fleckdat, col=lightscol,pch=c(16, 17)[pch=fleckdat$temp])
lines(gsfleck_seq, fleckupr, lty=2, lwd=2,col=lightscol)
lines(gsfleck_seq, flecklwr, lty=2, lwd=2,col=lightscol)
lines(gsfleck_seq, gsfleck_pred$fit, lty=1, lwd=2,col=lightscol)
legend("topleft", alllab, pch=c(16,16,16,16,17), col=allcols,inset = 0.01, bty='n',cex=.8)
text(x=.5, y=24.5, "(a)", cex=1)
####panel2: gm vs A
par(mar=c(4,4,1,1), cex=1.25, las=1, cex.axis=.8, cex.lab=1, mgp=c(2.5,1,0))
plot(Photo~gm, data=sundat, col=suncol, ylim=c(5,25), xlim=c(0,.5), xlab=gmlab, ylab=satlab,
pch=c(16, 17)[pch=gm_sunsha$temp])
points(Photo~gm, data=shadat, col=shacol, pch=c(16, 17)[pch=gm_sunsha$temp])
points(Photo~gm, data=fleckdat, col=lightscol, pch=c(16, 17)[pch=fleckdat$temp])
with(agm_sun, {
lines(gm, lcl, lty=2, lwd=2,col=suncol2)
lines(gm, ucl, lty=2, lwd=2,col=suncol2)
lines(gm, pred, lty=1, lwd=2,col=suncol2)
})
with(agm_sha, {
lines(gm, lcl, lty=2, lwd=2,col=shacol2)
lines(gm, ucl, lty=2, lwd=2,col=shacol2)
lines(gm, pred, lty=1, lwd=2,col=shacol2)
})
with(agm_fleck, {
lines(gm, lcl, lty=2, lwd=2,col=lightscol2)
lines(gm, ucl, lty=2, lwd=2,col=lightscol2)
lines(gm, pred, lty=1, lwd=2,col=lightscol2)
})
text(x=.5, y=24.5, "(b)", cex=1)
dev.copy2pdf(file="master_scripts/paper_figures/gmgs.pdf")
dev.off() | /scripts/gmgs_bootstrapping_plots.R | no_license | CourtneyCampany/WTC3_tree | R | false | false | 5,180 | r | source("functions and packages/functions.R")
source("master_scripts/plot_objects.R")
source("functions and packages/packages.R")
#read in gm data set (no drought) and Cibar(discrimination)-------------------------------------------------------
gmes <- read.csv("calculated_data/gmes_wellwatered.csv")
###get average by id
gm_agg <- summaryBy(Photo+ Cond + gm ~ chamber+id+leaf +light+temp+leaflight+Month, data=gmes, FUN=mean, keep.names=TRUE)
gm_agg$leaf <- gsub("s", "S", gm_agg$leaf)
gm_agg$light <- gsub("high", "Sun-light", gm_agg$light)
gm_agg$light <- gsub("low", "Shade-light", gm_agg$light)
##remove shade-high
gm_sunsha <- gm_agg[gm_agg$leaflight != "shade-high",]
gm_sunsha <- droplevels(gm_sunsha)
##sun-shade dataframes
sundat <- gm_sunsha[gm_sunsha$leaflight =="sun-high",]
shadat <- gm_sunsha[gm_sunsha$leaflight =="shade-low",]
##dfr with lights on
fleckdat <- gm_agg[gm_agg$leaflight == "shade-high",]
fleckdat <- droplevels(fleckdat)
#### GS vs A data: use gam for CI of non-linear relationship between A and gs----------------------------------------
library(mgcv)
#SUN leaves
sunmod <- gam(Photo ~ s(Cond, k=5), data=gm_sunsha, subset=leaflight=="sun-high")
#predict
#get apprpriate vector of gs from sun leaves
gsdat <- gm_sunsha[gm_sunsha$leaflight=="sun-high", "Cond"]
#generate sequence and then predict
gssun_seq <- seq(min(gsdat), max(gsdat), length=101)
gssun_pred <- predict(sunmod, newdata=data.frame(Cond=gssun_seq), se.fit=TRUE)
#ci and model fit
sunupr <- gssun_pred$fit + (2*gssun_pred$se.fit)
sunlwr <- gssun_pred$fit - (2*gssun_pred$se.fit)
#SHADE leaves
shamod <- gam(Photo ~ s(Cond, k=5), data=gm_sunsha, subset=leaflight=="shade-low")
#get apprpriate vector cond from sun leaves
gsdat2 <- gm_sunsha[gm_sunsha$leaflight=="shade-low", "Cond"]
#generate sequence and then predict
gssha_seq <- seq(min(gsdat2), max(gsdat2), length=101)
gssha_pred <- predict(shamod, newdata=data.frame(Cond=gssha_seq), type="link", se.fit=TRUE)
shaupr <- gssha_pred$fit + (2*gssha_pred$se.fit)
shalwr <- gssha_pred$fit - (2*gssha_pred$se.fit)
#SUNFLECK leaves
fleckmod <- gam(Photo ~ s(Cond, k=5), data=fleckdat)
#get apprpriate vector cond from sun leaves
gsfleck <- fleckdat[, "Cond"]
#generate sequence and then predict
gsfleck_seq <- seq(min(gsfleck), max(gsfleck), length=101)
gsfleck_pred <- predict(fleckmod, newdata=data.frame(Cond=gsfleck_seq), type="link", se.fit=TRUE)
fleckupr <- gsfleck_pred$fit + (2*gsfleck_pred$se.fit)
flecklwr <- gsfleck_pred$fit - (2*gsfleck_pred$se.fit)
#### Gm vs A data: use gam for CI of non-linear relationship between A and gs----------------------------------------------
##read bootstrapped data previosuly ran from sunshade phys script
agm_sun <- read.csv( "master_scripts/bootstrap_results/agm_sun.csv")
agm_sha <- read.csv( "master_scripts/bootstrap_results/agm_sha.csv")
agm_fleck <- read.csv( "master_scripts/bootstrap_results/agm_fleck.csv")
####PLOTTING: 2 panel figure with gm, gs, A---------------------------------------------------------------------------------
#1: panel Photosynthesis vs gs (gam plots)
windows(10, 12)
par(mfrow=c(2,1))
par(mar=c(4,4,1,1), cex=1.25, las=1, cex.axis=.8, cex.lab=1, mgp=c(2.5,1,0))
plot(Photo~Cond, data=gm_sunsha, subset=leaflight=="sun-high", col=suncol, ylim=c(5,25),
xlim=c(0,.5), xlab=condlab, ylab=satlab, pch=c(16, 17)[pch=gm_sunsha$temp])
lines(gssun_seq, sunupr, lty=2, lwd=2,col=suncol)
lines(gssun_seq, sunlwr, lty=2, lwd=2,col=suncol)
lines(gssun_seq, gssun_pred$fit, lty=1, lwd=2,col=suncol)
#shade
points(Photo~Cond, data=gm_sunsha, subset=leaflight=="shade-low",col=shacol,pch=c(16, 17)[pch=gm_sunsha$temp])
lines(gssha_seq, shaupr, lty=2, lwd=2,col=shacol)
lines(gssha_seq, shalwr, lty=2, lwd=2,col=shacol)
lines(gssha_seq, gssha_pred$fit, lty=1, lwd=2,col=shacol)
#sunfleck
points(Photo~Cond, data=fleckdat, col=lightscol,pch=c(16, 17)[pch=fleckdat$temp])
lines(gsfleck_seq, fleckupr, lty=2, lwd=2,col=lightscol)
lines(gsfleck_seq, flecklwr, lty=2, lwd=2,col=lightscol)
lines(gsfleck_seq, gsfleck_pred$fit, lty=1, lwd=2,col=lightscol)
legend("topleft", alllab, pch=c(16,16,16,16,17), col=allcols,inset = 0.01, bty='n',cex=.8)
text(x=.5, y=24.5, "(a)", cex=1)
####panel2: gm vs A
par(mar=c(4,4,1,1), cex=1.25, las=1, cex.axis=.8, cex.lab=1, mgp=c(2.5,1,0))
plot(Photo~gm, data=sundat, col=suncol, ylim=c(5,25), xlim=c(0,.5), xlab=gmlab, ylab=satlab,
pch=c(16, 17)[pch=gm_sunsha$temp])
points(Photo~gm, data=shadat, col=shacol, pch=c(16, 17)[pch=gm_sunsha$temp])
points(Photo~gm, data=fleckdat, col=lightscol, pch=c(16, 17)[pch=fleckdat$temp])
with(agm_sun, {
lines(gm, lcl, lty=2, lwd=2,col=suncol2)
lines(gm, ucl, lty=2, lwd=2,col=suncol2)
lines(gm, pred, lty=1, lwd=2,col=suncol2)
})
with(agm_sha, {
lines(gm, lcl, lty=2, lwd=2,col=shacol2)
lines(gm, ucl, lty=2, lwd=2,col=shacol2)
lines(gm, pred, lty=1, lwd=2,col=shacol2)
})
with(agm_fleck, {
lines(gm, lcl, lty=2, lwd=2,col=lightscol2)
lines(gm, ucl, lty=2, lwd=2,col=lightscol2)
lines(gm, pred, lty=1, lwd=2,col=lightscol2)
})
text(x=.5, y=24.5, "(b)", cex=1)
dev.copy2pdf(file="master_scripts/paper_figures/gmgs.pdf")
dev.off() |
# Engines API
#
# Allow clients to fetch Engines Analytics through APIs.
#
# The version of the OpenAPI document: 2
# Contact: analytics.api.support@factset.com
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title SPARDateParameters
#' @description SPARDateParameters Class
#' @format An \code{R6Class} generator object
#' @field startdate character
#'
#' @field enddate character
#'
#' @field frequency character
#'
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
SPARDateParameters <- R6::R6Class(
'SPARDateParameters',
public = list(
`startdate` = NULL,
`enddate` = NULL,
`frequency` = NULL,
initialize = function(`startdate`, `enddate`, `frequency`, ...){
local.optional.var <- list(...)
if (!missing(`startdate`)) {
stopifnot(is.character(`startdate`), length(`startdate`) == 1)
self$`startdate` <- `startdate`
}
if (!missing(`enddate`)) {
stopifnot(is.character(`enddate`), length(`enddate`) == 1)
self$`enddate` <- `enddate`
}
if (!missing(`frequency`)) {
stopifnot(is.character(`frequency`), length(`frequency`) == 1)
self$`frequency` <- `frequency`
}
},
toJSON = function() {
SPARDateParametersObject <- list()
if (!is.null(self$`startdate`)) {
SPARDateParametersObject[['startdate']] <-
self$`startdate`
}
if (!is.null(self$`enddate`)) {
SPARDateParametersObject[['enddate']] <-
self$`enddate`
}
if (!is.null(self$`frequency`)) {
SPARDateParametersObject[['frequency']] <-
self$`frequency`
}
SPARDateParametersObject
},
fromJSON = function(SPARDateParametersJson) {
SPARDateParametersObject <- jsonlite::fromJSON(SPARDateParametersJson)
if (!is.null(SPARDateParametersObject$`startdate`)) {
self$`startdate` <- SPARDateParametersObject$`startdate`
}
if (!is.null(SPARDateParametersObject$`enddate`)) {
self$`enddate` <- SPARDateParametersObject$`enddate`
}
if (!is.null(SPARDateParametersObject$`frequency`)) {
self$`frequency` <- SPARDateParametersObject$`frequency`
}
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`startdate`)) {
sprintf(
'"startdate":
"%s"
',
self$`startdate`
)},
if (!is.null(self$`enddate`)) {
sprintf(
'"enddate":
"%s"
',
self$`enddate`
)},
if (!is.null(self$`frequency`)) {
sprintf(
'"frequency":
"%s"
',
self$`frequency`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(SPARDateParametersJson) {
SPARDateParametersObject <- jsonlite::fromJSON(SPARDateParametersJson)
self$`startdate` <- SPARDateParametersObject$`startdate`
self$`enddate` <- SPARDateParametersObject$`enddate`
self$`frequency` <- SPARDateParametersObject$`frequency`
self
}
)
)
| /R/spar_date_parameters.R | no_license | cran/factset.analyticsapi.engines | R | false | false | 3,313 | r | # Engines API
#
# Allow clients to fetch Engines Analytics through APIs.
#
# The version of the OpenAPI document: 2
# Contact: analytics.api.support@factset.com
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title SPARDateParameters
#' @description SPARDateParameters Class
#' @format An \code{R6Class} generator object
#' @field startdate character
#'
#' @field enddate character
#'
#' @field frequency character
#'
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
SPARDateParameters <- R6::R6Class(
'SPARDateParameters',
public = list(
`startdate` = NULL,
`enddate` = NULL,
`frequency` = NULL,
initialize = function(`startdate`, `enddate`, `frequency`, ...){
local.optional.var <- list(...)
if (!missing(`startdate`)) {
stopifnot(is.character(`startdate`), length(`startdate`) == 1)
self$`startdate` <- `startdate`
}
if (!missing(`enddate`)) {
stopifnot(is.character(`enddate`), length(`enddate`) == 1)
self$`enddate` <- `enddate`
}
if (!missing(`frequency`)) {
stopifnot(is.character(`frequency`), length(`frequency`) == 1)
self$`frequency` <- `frequency`
}
},
toJSON = function() {
SPARDateParametersObject <- list()
if (!is.null(self$`startdate`)) {
SPARDateParametersObject[['startdate']] <-
self$`startdate`
}
if (!is.null(self$`enddate`)) {
SPARDateParametersObject[['enddate']] <-
self$`enddate`
}
if (!is.null(self$`frequency`)) {
SPARDateParametersObject[['frequency']] <-
self$`frequency`
}
SPARDateParametersObject
},
fromJSON = function(SPARDateParametersJson) {
SPARDateParametersObject <- jsonlite::fromJSON(SPARDateParametersJson)
if (!is.null(SPARDateParametersObject$`startdate`)) {
self$`startdate` <- SPARDateParametersObject$`startdate`
}
if (!is.null(SPARDateParametersObject$`enddate`)) {
self$`enddate` <- SPARDateParametersObject$`enddate`
}
if (!is.null(SPARDateParametersObject$`frequency`)) {
self$`frequency` <- SPARDateParametersObject$`frequency`
}
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`startdate`)) {
sprintf(
'"startdate":
"%s"
',
self$`startdate`
)},
if (!is.null(self$`enddate`)) {
sprintf(
'"enddate":
"%s"
',
self$`enddate`
)},
if (!is.null(self$`frequency`)) {
sprintf(
'"frequency":
"%s"
',
self$`frequency`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(SPARDateParametersJson) {
SPARDateParametersObject <- jsonlite::fromJSON(SPARDateParametersJson)
self$`startdate` <- SPARDateParametersObject$`startdate`
self$`enddate` <- SPARDateParametersObject$`enddate`
self$`frequency` <- SPARDateParametersObject$`frequency`
self
}
)
)
|
pretty_parse <- function(txt){
p <- parse(text = txt)
p1 <- utils::getParseData(p)
rmParent <- p1$parent[p1$token == "SYMBOL_PACKAGE"]
ret <- p1[p1$token == "SYMBOL_FUNCTION_CALL" & !p1$parent %in% rmParent, ]
if(length(ret)>0){
#clean out list functions
clean_idx <- sapply(sprintf('\\$%s',ret$text),function(p) !any(grepl(pattern = p,x=txt)))
if(length(clean_idx)>0){
ret <- ret[clean_idx,]
}
}
ret
}
#' @importFrom crayon red
#' @importFrom stringi stri_sub
pretty_shift <- function(txt, sym.funs, nm, overwrite, force, ignore){
sym.funs <- pretty_manip(sym.funs, force, ignore)
if(!overwrite){
sym.funs$new_text <- crayon::red(sym.funs$new_text)
}
idx <- which(!sym.funs$namespace %in% c("base", NA))
sym.funs.i <- split(sym.funs[idx,],sym.funs$line1[idx])
sym.funs.i.shift <- lapply(sym.funs.i,function(sf){
x <- rep(0,nrow(sf))
if(nrow(sf)>1){
for(i in 2:nrow(sf)){
x[i:nrow(sf)] <- x[i] + (nchar(sf$new_text[i - 1]) - nchar(sf$text[i - 1]))
}
}
sf$col_shift <- x
sf
})
sym.funs.shift <- do.call('rbind',sym.funs.i.shift)
sym.funs.shift$col1 <- sym.funs.shift$col1 + sym.funs.shift$col_shift
sym.funs.shift$col2 <- sym.funs.shift$col2 + sym.funs.shift$col_shift
for(i in 1:length(idx)){
stringi::stri_sub(
str = txt[sym.funs.shift$line1[i]],
from = sym.funs.shift$col1[i],
to = sym.funs.shift$col2[i]) <- sym.funs.shift$new_text[i]
}
if (overwrite) {
cat(txt, sep = "\n", file = nm)
pretty_print(sym.funs,file = nm)
} else {
pretty_print(sym.funs,file = nm)
if(sinew_opts$get('pretty_print'))
writeLines(crayon::white(txt))
}
sym.funs
}
pretty_manip <- function(sym.funs, force, ignore){
sym.funs$action <- ''
if(!is.null(force)){
sym.funs <- pretty_merge(sym.funs,force,'replace')
}
if(!is.null(ignore)){
sym.funs <- pretty_merge(sym.funs,ignore,'remove')
}
sym.funs$new_text <- sprintf('%s::%s',sym.funs$namespace, sym.funs$text)
sym.funs
}
#' @importFrom cli symbol
pretty_merge <- function(e1,e2,action = 'relpace'){
e2 <- sapply(names(e2),function(x){
if(is.null(e2[[x]])){
ls(envir = asNamespace(x))
}else{
e2[[x]]
}
},
simplify = FALSE)
e1 <- merge(e1,enframe_list(e2),by = 'text',all.x = TRUE)
e1 <- switch(action,
'replace'={
e1$namespace[!is.na(e1$force_ns)] <- e1$force_ns[!is.na(e1$force_ns)]
e1
},
'remove'={
e1[is.na(e1$force_ns),]
})
e1$action[!is.na(e1$force_ns)] <- cli::symbol$checkbox_on
e1$force_ns <- NULL
e1[order(e1$id),]
}
#' @importFrom sos findFn
#' @importFrom utils help.search menu
pretty_find <- function(NMPATH, sos, sym.funs, funs, ask, askenv){
check_global <- ls(envir = get(search()[1]))
if (length(check_global)>0){
global.funs <- check_global[sapply(check_global, function(x) inherits(get(x),what="function"))]
funs <- funs[!funs %in% global.funs]
}
for (x in NMPATH) {
if (length(funs) == 0) break
found <- funs %in% mf(x, funs)
sym.funs$namespace[sym.funs$text %in% funs[found]] <- x
funs <- funs[!found]
}
if (length(funs) > 0) {
for (fun in funs) {
suppressWarnings(fun.help <- utils::help.search(sprintf("^%s$", fun), ignore.case = FALSE))
if (nrow(fun.help$matches) > 0) {
if(length(fun.help$matches$Package)>1&ask){
choices <- sprintf('%s::%s',fun.help$matches$Package,fun)
persistent_choices <- ls(envir = askenv)
intersect_choices <- intersect(persistent_choices,choices)
if(length(intersect_choices)>0){
choice <- intersect_choices
}else{
menu_choices <- c(sprintf('%s(*)',choices),choices)
menu_title <- sprintf('Select which namespace to use for "%s"\n(*) if you want it to persist for all subsequent instances\none will omit a namespace',fun)
choice_idx <- utils::menu(choices = menu_choices,title=menu_title)
choice <- menu_choices[choice_idx]
if(grepl('\\(*\\)$',choice)){
clean_choice <- gsub('\\(\\*\\)$','',choice)
assign(clean_choice,TRUE,askenv)
}
}
pkg_choice <- gsub(':(.*?)$','',choice)
}else{
pkg_choice <- fun.help$matches$Package[1]
}
sym.funs$namespace[sym.funs$text %in% fun] <- pkg_choice
funs <- funs[-which(funs%in%fun)]
}
}
}
if (sos & length(funs) > 0) {
for (fun in funs) {
suppressWarnings(fun.sos <- sos::findFn(fun, maxPages = 1, verbose = 0))
if (nrow(fun.sos)) {
sym.funs$namespace[sym.funs$text %in% fun] <- fun.sos$Package[1]
funs <- funs[-match(fun, funs)]
}
}
}
sym.funs
}
enframe_list <- function(x){
do.call('rbind',lapply(names(x),function(y) data.frame(force_ns = y, text = x[[y]],stringsAsFactors = FALSE)))
}
#' @importFrom crayon red strip_style
#' @importFrom cli symbol
pretty_print <- function(obj,file,chunk=NULL){
if(!sinew_opts$get('pretty_print'))
return(NULL)
if(nrow(obj)==0)
return(NULL)
if(!grepl('\\.r$|\\.rmd$',tolower(file)))
file <- 'text object'
if(!is.null(chunk)){
file <- sprintf('%s (%s)',file,chunk)
}
obj <- obj[!obj$namespace %in% c("base"),]
if(nrow(obj)==0)
return(NULL)
obj$new_text <- crayon::strip_style(obj$new_text)
obj$symbol <- ifelse(is.na(obj$namespac),crayon::red(cli::symbol$cross),cli::symbol$tick)
obj$new_text <- gsub('^NA::','',obj$new_text)
tbl <- table(obj$new_text)
counts <- setNames(as.numeric(tbl),names(tbl))
obj <- obj[!duplicated(obj$new_text),]
obj$counts <- NA
obj$counts[match(obj$new_text,names(counts))] <- counts
obj$out_text <- sprintf(' %s %s (%s) %s',
obj$symbol,
numpad(obj$new_text),
numpad(obj$counts),
obj$action
)
cat(
sprintf("\nfunctions changed in '%s':\n\n%s: found, %s: not found, (): instances, %s: user intervention\n\n%s\n\n",
file,
cli::symbol$tick,
crayon::red(cli::symbol$cross),
cli::symbol$checkbox_on,
paste0(obj$out_text,collapse = '\n')
)
)
}
numpad <- function(x){
pad <- max(nchar(as.character(x)))
if(inherits(x,c('numeric','integer')))
ret <- sprintf(paste0('%0',pad,'d'),x)
if(inherits(x,'character'))
ret <- sprintf('%s%s',x,strrep(' ',pad - nchar(x)))
ret
}
mf <- function(x, pat) {
ns <- try(
{
if((!isNamespaceLoaded(x))|(!x%in%basename(searchpaths()))){
y <- attachNamespace(x)
}
ls(
name = sprintf('package:%s',x),
pattern = sprintf("^(%s)$", paste0(pat, collapse = "|"))
)
},
silent = TRUE
)
if (class(ns) == "try-error") {
ns <- vector("character")
}
ns
}
| /R/pretty_utils.R | permissive | moodymudskipper/sinew | R | false | false | 7,411 | r | pretty_parse <- function(txt){
p <- parse(text = txt)
p1 <- utils::getParseData(p)
rmParent <- p1$parent[p1$token == "SYMBOL_PACKAGE"]
ret <- p1[p1$token == "SYMBOL_FUNCTION_CALL" & !p1$parent %in% rmParent, ]
if(length(ret)>0){
#clean out list functions
clean_idx <- sapply(sprintf('\\$%s',ret$text),function(p) !any(grepl(pattern = p,x=txt)))
if(length(clean_idx)>0){
ret <- ret[clean_idx,]
}
}
ret
}
#' @importFrom crayon red
#' @importFrom stringi stri_sub
pretty_shift <- function(txt, sym.funs, nm, overwrite, force, ignore){
sym.funs <- pretty_manip(sym.funs, force, ignore)
if(!overwrite){
sym.funs$new_text <- crayon::red(sym.funs$new_text)
}
idx <- which(!sym.funs$namespace %in% c("base", NA))
sym.funs.i <- split(sym.funs[idx,],sym.funs$line1[idx])
sym.funs.i.shift <- lapply(sym.funs.i,function(sf){
x <- rep(0,nrow(sf))
if(nrow(sf)>1){
for(i in 2:nrow(sf)){
x[i:nrow(sf)] <- x[i] + (nchar(sf$new_text[i - 1]) - nchar(sf$text[i - 1]))
}
}
sf$col_shift <- x
sf
})
sym.funs.shift <- do.call('rbind',sym.funs.i.shift)
sym.funs.shift$col1 <- sym.funs.shift$col1 + sym.funs.shift$col_shift
sym.funs.shift$col2 <- sym.funs.shift$col2 + sym.funs.shift$col_shift
for(i in 1:length(idx)){
stringi::stri_sub(
str = txt[sym.funs.shift$line1[i]],
from = sym.funs.shift$col1[i],
to = sym.funs.shift$col2[i]) <- sym.funs.shift$new_text[i]
}
if (overwrite) {
cat(txt, sep = "\n", file = nm)
pretty_print(sym.funs,file = nm)
} else {
pretty_print(sym.funs,file = nm)
if(sinew_opts$get('pretty_print'))
writeLines(crayon::white(txt))
}
sym.funs
}
pretty_manip <- function(sym.funs, force, ignore){
sym.funs$action <- ''
if(!is.null(force)){
sym.funs <- pretty_merge(sym.funs,force,'replace')
}
if(!is.null(ignore)){
sym.funs <- pretty_merge(sym.funs,ignore,'remove')
}
sym.funs$new_text <- sprintf('%s::%s',sym.funs$namespace, sym.funs$text)
sym.funs
}
#' @importFrom cli symbol
pretty_merge <- function(e1,e2,action = 'relpace'){
e2 <- sapply(names(e2),function(x){
if(is.null(e2[[x]])){
ls(envir = asNamespace(x))
}else{
e2[[x]]
}
},
simplify = FALSE)
e1 <- merge(e1,enframe_list(e2),by = 'text',all.x = TRUE)
e1 <- switch(action,
'replace'={
e1$namespace[!is.na(e1$force_ns)] <- e1$force_ns[!is.na(e1$force_ns)]
e1
},
'remove'={
e1[is.na(e1$force_ns),]
})
e1$action[!is.na(e1$force_ns)] <- cli::symbol$checkbox_on
e1$force_ns <- NULL
e1[order(e1$id),]
}
#' @importFrom sos findFn
#' @importFrom utils help.search menu
pretty_find <- function(NMPATH, sos, sym.funs, funs, ask, askenv){
check_global <- ls(envir = get(search()[1]))
if (length(check_global)>0){
global.funs <- check_global[sapply(check_global, function(x) inherits(get(x),what="function"))]
funs <- funs[!funs %in% global.funs]
}
for (x in NMPATH) {
if (length(funs) == 0) break
found <- funs %in% mf(x, funs)
sym.funs$namespace[sym.funs$text %in% funs[found]] <- x
funs <- funs[!found]
}
if (length(funs) > 0) {
for (fun in funs) {
suppressWarnings(fun.help <- utils::help.search(sprintf("^%s$", fun), ignore.case = FALSE))
if (nrow(fun.help$matches) > 0) {
if(length(fun.help$matches$Package)>1&ask){
choices <- sprintf('%s::%s',fun.help$matches$Package,fun)
persistent_choices <- ls(envir = askenv)
intersect_choices <- intersect(persistent_choices,choices)
if(length(intersect_choices)>0){
choice <- intersect_choices
}else{
menu_choices <- c(sprintf('%s(*)',choices),choices)
menu_title <- sprintf('Select which namespace to use for "%s"\n(*) if you want it to persist for all subsequent instances\none will omit a namespace',fun)
choice_idx <- utils::menu(choices = menu_choices,title=menu_title)
choice <- menu_choices[choice_idx]
if(grepl('\\(*\\)$',choice)){
clean_choice <- gsub('\\(\\*\\)$','',choice)
assign(clean_choice,TRUE,askenv)
}
}
pkg_choice <- gsub(':(.*?)$','',choice)
}else{
pkg_choice <- fun.help$matches$Package[1]
}
sym.funs$namespace[sym.funs$text %in% fun] <- pkg_choice
funs <- funs[-which(funs%in%fun)]
}
}
}
if (sos & length(funs) > 0) {
for (fun in funs) {
suppressWarnings(fun.sos <- sos::findFn(fun, maxPages = 1, verbose = 0))
if (nrow(fun.sos)) {
sym.funs$namespace[sym.funs$text %in% fun] <- fun.sos$Package[1]
funs <- funs[-match(fun, funs)]
}
}
}
sym.funs
}
enframe_list <- function(x){
do.call('rbind',lapply(names(x),function(y) data.frame(force_ns = y, text = x[[y]],stringsAsFactors = FALSE)))
}
#' @importFrom crayon red strip_style
#' @importFrom cli symbol
pretty_print <- function(obj,file,chunk=NULL){
if(!sinew_opts$get('pretty_print'))
return(NULL)
if(nrow(obj)==0)
return(NULL)
if(!grepl('\\.r$|\\.rmd$',tolower(file)))
file <- 'text object'
if(!is.null(chunk)){
file <- sprintf('%s (%s)',file,chunk)
}
obj <- obj[!obj$namespace %in% c("base"),]
if(nrow(obj)==0)
return(NULL)
obj$new_text <- crayon::strip_style(obj$new_text)
obj$symbol <- ifelse(is.na(obj$namespac),crayon::red(cli::symbol$cross),cli::symbol$tick)
obj$new_text <- gsub('^NA::','',obj$new_text)
tbl <- table(obj$new_text)
counts <- setNames(as.numeric(tbl),names(tbl))
obj <- obj[!duplicated(obj$new_text),]
obj$counts <- NA
obj$counts[match(obj$new_text,names(counts))] <- counts
obj$out_text <- sprintf(' %s %s (%s) %s',
obj$symbol,
numpad(obj$new_text),
numpad(obj$counts),
obj$action
)
cat(
sprintf("\nfunctions changed in '%s':\n\n%s: found, %s: not found, (): instances, %s: user intervention\n\n%s\n\n",
file,
cli::symbol$tick,
crayon::red(cli::symbol$cross),
cli::symbol$checkbox_on,
paste0(obj$out_text,collapse = '\n')
)
)
}
numpad <- function(x){
pad <- max(nchar(as.character(x)))
if(inherits(x,c('numeric','integer')))
ret <- sprintf(paste0('%0',pad,'d'),x)
if(inherits(x,'character'))
ret <- sprintf('%s%s',x,strrep(' ',pad - nchar(x)))
ret
}
mf <- function(x, pat) {
ns <- try(
{
if((!isNamespaceLoaded(x))|(!x%in%basename(searchpaths()))){
y <- attachNamespace(x)
}
ls(
name = sprintf('package:%s',x),
pattern = sprintf("^(%s)$", paste0(pat, collapse = "|"))
)
},
silent = TRUE
)
if (class(ns) == "try-error") {
ns <- vector("character")
}
ns
}
|
packages<-c("textdata","dplyr","gutenbergr","tidytext","wordcloud","readr","tibble")
install.packages(setdiff(packages, rownames(installed.packages())))
library(tidytext)
library(dplyr)
library(gutenbergr)
library(stringr)
library(wordcloud)
library(tidyr)
library(ggplot2)
library(readr)
library(tibble)
create_stop_words <- function(stop_words_csv){
my_stop_words <- read_csv(stop_words_csv)
my_stop_words <- my_stop_words %>% add_column(lexicon = "horror_novels")
all_stop_words <- stop_words %>%
bind_rows(my_stop_words)
return(all_stop_words)
}
create_tidy_books <- function(gutenberg_collection){
result <- gutenberg_collection %>%
group_by(gutenberg_id) %>%
mutate(linenumber = row_number(),
chapter = cumsum(str_detect(text,
regex("^chapter [\\divxlc]",
ignore_case = TRUE)))) %>%
ungroup()%>%
unnest_tokens(word, text)
return(result)
}
create_wordcloud <- function(id,title,tidy_books){
filename = paste("../results/",title,".jpeg",sep="")
jpeg(file = filename)
tidy_books%>%
filter(gutenberg_id == id) %>%
anti_join(stop_words) %>%
count(word)%>%
with(wordcloud(word, n, max.words = 100,min.freq = 10,scale=c(4,.5)))
dev.off()
}
get_net_sentiment <- function(book_collection,chunklength){
result <- tidy_horror %>%
inner_join(get_sentiments("bing")) %>%
count(title,index = linenumber %/% chunklength, sentiment) %>%
spread(sentiment,n,fill=0) %>%
mutate(sentiment = positive-negative)
return(result)
}
| /src/sentiment_analysis.R | no_license | CS-510-Fall-2020/MidtermProjectTristanTran | R | false | false | 1,591 | r | packages<-c("textdata","dplyr","gutenbergr","tidytext","wordcloud","readr","tibble")
install.packages(setdiff(packages, rownames(installed.packages())))
library(tidytext)
library(dplyr)
library(gutenbergr)
library(stringr)
library(wordcloud)
library(tidyr)
library(ggplot2)
library(readr)
library(tibble)
create_stop_words <- function(stop_words_csv){
my_stop_words <- read_csv(stop_words_csv)
my_stop_words <- my_stop_words %>% add_column(lexicon = "horror_novels")
all_stop_words <- stop_words %>%
bind_rows(my_stop_words)
return(all_stop_words)
}
create_tidy_books <- function(gutenberg_collection){
result <- gutenberg_collection %>%
group_by(gutenberg_id) %>%
mutate(linenumber = row_number(),
chapter = cumsum(str_detect(text,
regex("^chapter [\\divxlc]",
ignore_case = TRUE)))) %>%
ungroup()%>%
unnest_tokens(word, text)
return(result)
}
create_wordcloud <- function(id,title,tidy_books){
filename = paste("../results/",title,".jpeg",sep="")
jpeg(file = filename)
tidy_books%>%
filter(gutenberg_id == id) %>%
anti_join(stop_words) %>%
count(word)%>%
with(wordcloud(word, n, max.words = 100,min.freq = 10,scale=c(4,.5)))
dev.off()
}
get_net_sentiment <- function(book_collection,chunklength){
result <- tidy_horror %>%
inner_join(get_sentiments("bing")) %>%
count(title,index = linenumber %/% chunklength, sentiment) %>%
spread(sentiment,n,fill=0) %>%
mutate(sentiment = positive-negative)
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{figure2a}
\alias{figure2a}
\title{miR34a asRNA expression in HCT116 and HEK293t upon doxorubicin treatment.}
\format{A tibble with 48 rows and 6 variables:
\describe{
\item{'Biological Replicate'}{Numeric; Indicates the biological replicate of the sample.}
\item{'Cell line'}{Character; Indicating the cell line the data corresponds to.}
\item{Treatment}{Character; Indicates the treatment type.}
\item{gene}{Character; indicates the gene that the Ct values correspond to.}
\item{Ct1}{Numeric; Ct value corresponding to the first technical replicate.}
\item{Ct2}{Numeric; Ct value corresponding to the 2nd technical replicate.}
...
}}
\description{
All cell lines were cultured at 5% CO2 and 37° C with HEK293T cells cultured
in DMEM high glucose (Hyclone) and HCT116 cells in McCoy’s 5a
(Life Technologies). All growth mediums were supplemented with 10%
heat-inactivated FBS and 50 μg/ml of streptomycin and 50 μg/ml of penicillin.
Cells were plated at 300,000 cells per well in a 6-well plate and cultured
overnight. The following day cells were treated with 0, 100, 200, or
500 ng/ml doxorubicin for 24hrs. RNA was extracted using the RNeasy mini kit
(Qiagen) and subsequently treated with DNase (Ambion Turbo DNA-free, Life
Technologies). 500ng RNA was used for cDNA synthesis using MuMLV (Life
Technologies) and a 1:1 mix of oligo(dT) and random nanomers. QPCR was
carried out using KAPA 2G SYBRGreen (Kapa Biosystems) using the Applied
Biosystems 7900HT machine with the cycling conditions: 95 °C for 3 min,
95 °C for 3 s, 60 °C for 30 s.
}
\examples{
getData('Figure 2a')
}
| /man/figure2a.Rd | no_license | GranderLab/miR34a_asRNA_project | R | false | true | 1,694 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{figure2a}
\alias{figure2a}
\title{miR34a asRNA expression in HCT116 and HEK293t upon doxorubicin treatment.}
\format{A tibble with 48 rows and 6 variables:
\describe{
\item{'Biological Replicate'}{Numeric; Indicates the biological replicate of the sample.}
\item{'Cell line'}{Character; Indicating the cell line the data corresponds to.}
\item{Treatment}{Character; Indicates the treatment type.}
\item{gene}{Character; indicates the gene that the Ct values correspond to.}
\item{Ct1}{Numeric; Ct value corresponding to the first technical replicate.}
\item{Ct2}{Numeric; Ct value corresponding to the 2nd technical replicate.}
...
}}
\description{
All cell lines were cultured at 5% CO2 and 37° C with HEK293T cells cultured
in DMEM high glucose (Hyclone) and HCT116 cells in McCoy’s 5a
(Life Technologies). All growth mediums were supplemented with 10%
heat-inactivated FBS and 50 μg/ml of streptomycin and 50 μg/ml of penicillin.
Cells were plated at 300,000 cells per well in a 6-well plate and cultured
overnight. The following day cells were treated with 0, 100, 200, or
500 ng/ml doxorubicin for 24hrs. RNA was extracted using the RNeasy mini kit
(Qiagen) and subsequently treated with DNase (Ambion Turbo DNA-free, Life
Technologies). 500ng RNA was used for cDNA synthesis using MuMLV (Life
Technologies) and a 1:1 mix of oligo(dT) and random nanomers. QPCR was
carried out using KAPA 2G SYBRGreen (Kapa Biosystems) using the Applied
Biosystems 7900HT machine with the cycling conditions: 95 °C for 3 min,
95 °C for 3 s, 60 °C for 30 s.
}
\examples{
getData('Figure 2a')
}
|
#Load the data file
library(readxl)
Loan <- read_excel("C:/Users/Vaibhav-PC/Downloads/Project 2/data.xlsx")
#Not performing on orginal df to avoid loading it again and again.
Loan1 = Loan
#Gives the summary of the variable loan_default
summary(Loan1$loan_default)
#UniqueID is not required as it is a dummy variable. Thus it can be removed.
Loan1 = Loan1[-1]
#Converting Employment.Type variable into factor as it is a character vector but should be
#a categorical variable.
Loan1$Employment.Type = as.factor(Loan1$Employment.Type)
#To see the distribution of each type of employment.
summary(Loan1$Employment.Type)
#MobileNo_Avl_Flag is affecting no variable & not needed as its min and max are 1.
#Thus removing it entirely while model creation.
Loan1 = Loan1[-13]
#Converting AVERAGE.ACCT.AGE to numeric values
library(stringr)
avr = str_split(Loan1$AVERAGE.ACCT.AGE, " ")
avr1 = 1
avr2 = 1
for (i in 1:length(avr)) {avr1[i] = avr[[i]][1]}
for (i in 1:length(avr)) {avr2[i] = avr[[i]][2]}
avr1 = gsub("[a-zA-Z]","",avr1)
avr1 = ifelse(is.na(avr1),0,avr1)
avr1 = as.numeric(avr1)
avr2 = gsub("[a-zA-Z]","",avr2)
avr2 = ifelse(is.na(avr2),0,avr2)
avr2 = as.numeric(avr2)
avr2 = avr2/12
Loan1$AVERAGE.ACCT.AGE = avr1 + avr2
#Converting CREDIT.HISTORY.LENGTH to numeric values
avr = str_split(Loan1$CREDIT.HISTORY.LENGTH, " ")
avr1 = 1
avr2 = 1
for (i in 1:length(avr)) {avr1[i] = avr[[i]][1]}
for (i in 1:length(avr)) {avr2[i] = avr[[i]][2]}
avr1 = gsub("[a-zA-Z]","",avr1)
avr1 = ifelse(is.na(avr1),0,avr1)
avr1 = as.numeric(avr1)
avr2 = gsub("[a-zA-Z]","",avr2)
avr2 = ifelse(is.na(avr2),0,avr2)
avr2 = as.numeric(avr2)
avr2 = avr2/12
Loan1$CREDIT.HISTORY.LENGTH = avr1 + avr2
rm(avr)
rm(avr1)
rm(avr2)
rm(i)
#Using date of birth and disbursal date to calculate age at time of disbursal.
#Then removing date of birth and disbursal date as they are not needed anymore.
Loan1$Date.of.Birth = as.Date(Loan1$Date.of.Birth)
Loan1$DisbursalDate = as.Date(Loan1$DisbursalDate)
library(eeptools)
Loan1$Age = age_calc(Loan1$Date.of.Birth, Loan1$DisbursalDate, units = "years")
Loan1 = Loan1[-c(8,10)]
write_xlsx(Loan1, "D://Final_Data.xlsx")
#Outliers removal from disbursed_amount
LT = mean(Loan1$disbursed_amount) - 2*sd(Loan1$disbursed_amount)
UT = mean(Loan1$disbursed_amount) + 2*sd(Loan1$disbursed_amount)
Loan2 = subset(Loan1, Loan1$disbursed_amount < UT & Loan1$disbursed_amount > LT)
#Outliers removal from asset_cost
LT = mean(Loan2$asset_cost) - 2*sd(Loan2$asset_cost)
UT = mean(Loan2$asset_cost) + 2*sd(Loan2$asset_cost)
Loan3 = subset(Loan2, Loan2$asset_cost < UT & Loan2$asset_cost > LT)
#Making -ve values in PRI.CURRENT.BALANCE as zero.
Loan3$PRI.CURRENT.BALANCE = ifelse(Loan3$PRI.CURRENT.BALANCE < 0,0,Loan3$PRI.CURRENT.BALANCE)
LT = mean(Loan3$PRI.CURRENT.BALANCE) - 2*sd(Loan3$PRI.CURRENT.BALANCE)
UT = mean(Loan3$PRI.CURRENT.BALANCE) + 2*sd(Loan3$PRI.CURRENT.BALANCE)
Loan3 = subset(Loan3, Loan3$PRI.CURRENT.BALANCE < UT & Loan3$PRI.CURRENT.BALANCE > LT)
#Outlier removals in PRI.SANCTIONED.AMOUNT
LT = mean(Loan3$PRI.SANCTIONED.AMOUNT) - 2*sd(Loan3$PRI.SANCTIONED.AMOUNT)
UT = mean(Loan3$PRI.SANCTIONED.AMOUNT) + 2*sd(Loan3$PRI.SANCTIONED.AMOUNT)
Loan3 = subset(Loan3, Loan3$PRI.SANCTIONED.AMOUNT < UT & Loan3$PRI.SANCTIONED.AMOUNT > LT)
#Outlier removals in PRI.DISBURSED.AMOUNT
LT = mean(Loan3$PRI.DISBURSED.AMOUNT) - 2*sd(Loan3$PRI.DISBURSED.AMOUNT)
UT = mean(Loan3$PRI.DISBURSED.AMOUNT) + 2*sd(Loan3$PRI.DISBURSED.AMOUNT)
Loan3 = subset(Loan3, Loan3$PRI.DISBURSED.AMOUNT < UT & Loan3$PRI.DISBURSED.AMOUNT > LT)
#Removal of -ve values and outliers from SEC.CURRENT.BALANCE
Loan3$SEC.CURRENT.BALANCE = ifelse(Loan3$SEC.CURRENT.BALANCE < 0, 0, Loan3$SEC.CURRENT.BALANCE)
LT = mean(Loan3$SEC.CURRENT.BALANCE) - 2*sd(Loan3$SEC.CURRENT.BALANCE)
UT = mean(Loan3$SEC.CURRENT.BALANCE) + 2*sd(Loan3$SEC.CURRENT.BALANCE)
Loan3 = subset(Loan3, Loan3$SEC.CURRENT.BALANCE < UT & Loan3$SEC.CURRENT.BALANCE > LT)
#outlier removal from SEC.SANCTIONED.AMOUNT
LT = mean(Loan3$SEC.SANCTIONED.AMOUNT) - 2*sd(Loan3$SEC.SANCTIONED.AMOUNT)
UT = mean(Loan3$SEC.SANCTIONED.AMOUNT) + 2*sd(Loan3$SEC.SANCTIONED.AMOUNT)
Loan3 = subset(Loan3, Loan3$SEC.SANCTIONED.AMOUNT < UT & Loan3$SEC.SANCTIONED.AMOUNT > LT)
#outlier removal from SEC.DISBURSED.AMOUNT
LT = mean(Loan3$SEC.DISBURSED.AMOUNT) - 2*sd(Loan3$SEC.DISBURSED.AMOUNT)
UT = mean(Loan3$SEC.DISBURSED.AMOUNT) + 2*sd(Loan3$SEC.DISBURSED.AMOUNT)
Loan3 = subset(Loan3, Loan3$SEC.DISBURSED.AMOUNT < UT & Loan3$SEC.DISBURSED.AMOUNT > LT)
#outlier removal from PRIMARY.INSTAL.AMT
LT = mean(Loan3$PRIMARY.INSTAL.AMT) - 2*sd(Loan3$PRIMARY.INSTAL.AMT)
UT = mean(Loan3$PRIMARY.INSTAL.AMT) + 2*sd(Loan3$PRIMARY.INSTAL.AMT)
Loan3 = subset(Loan3, Loan3$PRIMARY.INSTAL.AMT < UT & Loan3$PRIMARY.INSTAL.AMT > LT)
#outlier removal from SEC.INSTAL.AMT
LT = mean(Loan3$SEC.INSTAL.AMT) - 2*sd(Loan3$SEC.INSTAL.AMT)
UT = mean(Loan3$SEC.INSTAL.AMT) + 2*sd(Loan3$SEC.INSTAL.AMT)
Loan3 = subset(Loan3, Loan3$SEC.INSTAL.AMT < UT & Loan3$SEC.INSTAL.AMT > LT)
#Scaling of data frame as it contains numeric variables of with huge variations in range.
Loan4 = scale(Loan3[c(1,2,3,16,18:36,38)])
Loan4 = as.data.frame(Loan4)
Loan4 = cbind(Loan4, Loan3[c(4:15,17,37)])
#Using the approch of omitting the observations with NA's present. This will remove all
#the observations in Employment.Type that had empty cells in the excel data source.
Loan5 = na.omit(Loan4)
#Since PERFORM_CNS.SCORE.DESCRIPTION is used to class the score of PERFORM_CNS.SCORE in
#various categories, thus using the approach of excluding PERFORM_CNS.SCORE.DESCRIPTION
#in the model creation.
Loan5 = Loan5[-37]
#Model Creation
library(caret)
set.seed(1)
intrain = createDataPartition(Loan5$loan_default, p = 0.8, list = F)
Train = Loan5[intrain,]
Test = Loan5[-intrain,]
model0 = glm(Train$loan_default ~ ., data = Train, family = binomial(link = "logit"))
library(MASS)
#Using AIC approach to get the model.
step0 = stepAIC(model0, direction = "both")
summary(step0)
#Predicting values using model in the Test data created using createDataPartition function.
Pred = predict(step0, newdata = Test[,-37], type = "response")
Pred1 = ifelse(Pred < 0.4, 0, 1)
#Create a confusion matrix
library(e1071)
a = table(Test$loan_default, Pred1, dnn = list("actual", "predicted"))
a
caret::confusionMatrix(a)
| /Final Project 2.R | no_license | VaibhavBajaj-hub/Data-analytics-capstone | R | false | false | 6,512 | r | #Load the data file
library(readxl)
Loan <- read_excel("C:/Users/Vaibhav-PC/Downloads/Project 2/data.xlsx")
#Not performing on orginal df to avoid loading it again and again.
Loan1 = Loan
#Gives the summary of the variable loan_default
summary(Loan1$loan_default)
#UniqueID is not required as it is a dummy variable. Thus it can be removed.
Loan1 = Loan1[-1]
#Converting Employment.Type variable into factor as it is a character vector but should be
#a categorical variable.
Loan1$Employment.Type = as.factor(Loan1$Employment.Type)
#To see the distribution of each type of employment.
summary(Loan1$Employment.Type)
#MobileNo_Avl_Flag is affecting no variable & not needed as its min and max are 1.
#Thus removing it entirely while model creation.
Loan1 = Loan1[-13]
#Converting AVERAGE.ACCT.AGE to numeric values
library(stringr)
avr = str_split(Loan1$AVERAGE.ACCT.AGE, " ")
avr1 = 1
avr2 = 1
for (i in 1:length(avr)) {avr1[i] = avr[[i]][1]}
for (i in 1:length(avr)) {avr2[i] = avr[[i]][2]}
avr1 = gsub("[a-zA-Z]","",avr1)
avr1 = ifelse(is.na(avr1),0,avr1)
avr1 = as.numeric(avr1)
avr2 = gsub("[a-zA-Z]","",avr2)
avr2 = ifelse(is.na(avr2),0,avr2)
avr2 = as.numeric(avr2)
avr2 = avr2/12
Loan1$AVERAGE.ACCT.AGE = avr1 + avr2
#Converting CREDIT.HISTORY.LENGTH to numeric values
avr = str_split(Loan1$CREDIT.HISTORY.LENGTH, " ")
avr1 = 1
avr2 = 1
for (i in 1:length(avr)) {avr1[i] = avr[[i]][1]}
for (i in 1:length(avr)) {avr2[i] = avr[[i]][2]}
avr1 = gsub("[a-zA-Z]","",avr1)
avr1 = ifelse(is.na(avr1),0,avr1)
avr1 = as.numeric(avr1)
avr2 = gsub("[a-zA-Z]","",avr2)
avr2 = ifelse(is.na(avr2),0,avr2)
avr2 = as.numeric(avr2)
avr2 = avr2/12
Loan1$CREDIT.HISTORY.LENGTH = avr1 + avr2
rm(avr)
rm(avr1)
rm(avr2)
rm(i)
#Using date of birth and disbursal date to calculate age at time of disbursal.
#Then removing date of birth and disbursal date as they are not needed anymore.
Loan1$Date.of.Birth = as.Date(Loan1$Date.of.Birth)
Loan1$DisbursalDate = as.Date(Loan1$DisbursalDate)
library(eeptools)
Loan1$Age = age_calc(Loan1$Date.of.Birth, Loan1$DisbursalDate, units = "years")
Loan1 = Loan1[-c(8,10)]
write_xlsx(Loan1, "D://Final_Data.xlsx")
#Outliers removal from disbursed_amount
LT = mean(Loan1$disbursed_amount) - 2*sd(Loan1$disbursed_amount)
UT = mean(Loan1$disbursed_amount) + 2*sd(Loan1$disbursed_amount)
Loan2 = subset(Loan1, Loan1$disbursed_amount < UT & Loan1$disbursed_amount > LT)
#Outliers removal from asset_cost
LT = mean(Loan2$asset_cost) - 2*sd(Loan2$asset_cost)
UT = mean(Loan2$asset_cost) + 2*sd(Loan2$asset_cost)
Loan3 = subset(Loan2, Loan2$asset_cost < UT & Loan2$asset_cost > LT)
#Making -ve values in PRI.CURRENT.BALANCE as zero.
Loan3$PRI.CURRENT.BALANCE = ifelse(Loan3$PRI.CURRENT.BALANCE < 0,0,Loan3$PRI.CURRENT.BALANCE)
LT = mean(Loan3$PRI.CURRENT.BALANCE) - 2*sd(Loan3$PRI.CURRENT.BALANCE)
UT = mean(Loan3$PRI.CURRENT.BALANCE) + 2*sd(Loan3$PRI.CURRENT.BALANCE)
Loan3 = subset(Loan3, Loan3$PRI.CURRENT.BALANCE < UT & Loan3$PRI.CURRENT.BALANCE > LT)
#Outlier removals in PRI.SANCTIONED.AMOUNT
LT = mean(Loan3$PRI.SANCTIONED.AMOUNT) - 2*sd(Loan3$PRI.SANCTIONED.AMOUNT)
UT = mean(Loan3$PRI.SANCTIONED.AMOUNT) + 2*sd(Loan3$PRI.SANCTIONED.AMOUNT)
Loan3 = subset(Loan3, Loan3$PRI.SANCTIONED.AMOUNT < UT & Loan3$PRI.SANCTIONED.AMOUNT > LT)
#Outlier removals in PRI.DISBURSED.AMOUNT
LT = mean(Loan3$PRI.DISBURSED.AMOUNT) - 2*sd(Loan3$PRI.DISBURSED.AMOUNT)
UT = mean(Loan3$PRI.DISBURSED.AMOUNT) + 2*sd(Loan3$PRI.DISBURSED.AMOUNT)
Loan3 = subset(Loan3, Loan3$PRI.DISBURSED.AMOUNT < UT & Loan3$PRI.DISBURSED.AMOUNT > LT)
#Removal of -ve values and outliers from SEC.CURRENT.BALANCE
Loan3$SEC.CURRENT.BALANCE = ifelse(Loan3$SEC.CURRENT.BALANCE < 0, 0, Loan3$SEC.CURRENT.BALANCE)
LT = mean(Loan3$SEC.CURRENT.BALANCE) - 2*sd(Loan3$SEC.CURRENT.BALANCE)
UT = mean(Loan3$SEC.CURRENT.BALANCE) + 2*sd(Loan3$SEC.CURRENT.BALANCE)
Loan3 = subset(Loan3, Loan3$SEC.CURRENT.BALANCE < UT & Loan3$SEC.CURRENT.BALANCE > LT)
#outlier removal from SEC.SANCTIONED.AMOUNT
LT = mean(Loan3$SEC.SANCTIONED.AMOUNT) - 2*sd(Loan3$SEC.SANCTIONED.AMOUNT)
UT = mean(Loan3$SEC.SANCTIONED.AMOUNT) + 2*sd(Loan3$SEC.SANCTIONED.AMOUNT)
Loan3 = subset(Loan3, Loan3$SEC.SANCTIONED.AMOUNT < UT & Loan3$SEC.SANCTIONED.AMOUNT > LT)
#outlier removal from SEC.DISBURSED.AMOUNT
LT = mean(Loan3$SEC.DISBURSED.AMOUNT) - 2*sd(Loan3$SEC.DISBURSED.AMOUNT)
UT = mean(Loan3$SEC.DISBURSED.AMOUNT) + 2*sd(Loan3$SEC.DISBURSED.AMOUNT)
Loan3 = subset(Loan3, Loan3$SEC.DISBURSED.AMOUNT < UT & Loan3$SEC.DISBURSED.AMOUNT > LT)
#outlier removal from PRIMARY.INSTAL.AMT
LT = mean(Loan3$PRIMARY.INSTAL.AMT) - 2*sd(Loan3$PRIMARY.INSTAL.AMT)
UT = mean(Loan3$PRIMARY.INSTAL.AMT) + 2*sd(Loan3$PRIMARY.INSTAL.AMT)
Loan3 = subset(Loan3, Loan3$PRIMARY.INSTAL.AMT < UT & Loan3$PRIMARY.INSTAL.AMT > LT)
#outlier removal from SEC.INSTAL.AMT
LT = mean(Loan3$SEC.INSTAL.AMT) - 2*sd(Loan3$SEC.INSTAL.AMT)
UT = mean(Loan3$SEC.INSTAL.AMT) + 2*sd(Loan3$SEC.INSTAL.AMT)
Loan3 = subset(Loan3, Loan3$SEC.INSTAL.AMT < UT & Loan3$SEC.INSTAL.AMT > LT)
#Scaling of data frame as it contains numeric variables of with huge variations in range.
Loan4 = scale(Loan3[c(1,2,3,16,18:36,38)])
Loan4 = as.data.frame(Loan4)
Loan4 = cbind(Loan4, Loan3[c(4:15,17,37)])
#Using the approch of omitting the observations with NA's present. This will remove all
#the observations in Employment.Type that had empty cells in the excel data source.
Loan5 = na.omit(Loan4)
#Since PERFORM_CNS.SCORE.DESCRIPTION is used to class the score of PERFORM_CNS.SCORE in
#various categories, thus using the approach of excluding PERFORM_CNS.SCORE.DESCRIPTION
#in the model creation.
Loan5 = Loan5[-37]
#Model Creation
library(caret)
set.seed(1)
intrain = createDataPartition(Loan5$loan_default, p = 0.8, list = F)
Train = Loan5[intrain,]
Test = Loan5[-intrain,]
model0 = glm(Train$loan_default ~ ., data = Train, family = binomial(link = "logit"))
library(MASS)
#Using AIC approach to get the model.
step0 = stepAIC(model0, direction = "both")
summary(step0)
#Predicting values using model in the Test data created using createDataPartition function.
Pred = predict(step0, newdata = Test[,-37], type = "response")
Pred1 = ifelse(Pred < 0.4, 0, 1)
#Create a confusion matrix
library(e1071)
a = table(Test$loan_default, Pred1, dnn = list("actual", "predicted"))
a
caret::confusionMatrix(a)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selection.R
\name{selection}
\alias{selection}
\title{Full Bayesian Models to handle missingness in Economic Evaluations (Selection Models)}
\usage{
selection(
data,
model.eff,
model.cost,
model.me = me ~ 1,
model.mc = mc ~ 1,
dist_e,
dist_c,
type,
prob = c(0.025, 0.975),
n.chains = 2,
n.iter = 20000,
n.burnin = floor(n.iter/2),
inits = NULL,
n.thin = 1,
ppc = FALSE,
save_model = FALSE,
prior = "default",
...
)
}
\arguments{
\item{data}{A data frame in which to find the variables supplied in \code{model.eff}, \code{model.cost} (model formulas for effects and costs)
and \code{model.me}, \code{model.mc} (model formulas for the missing effect and cost models). Among these,
effectiveness, cost and treatment indicator (only two arms) variables must always be provided and named 'e', 'c' and 't', respectively.}
\item{model.eff}{A formula expression in conventional \code{R} linear modelling syntax. The response must be a health economic
effectiveness outcome ('e') whose name must correspond to that used in \code{data}. Any covariates in the model must be provided on the right-hand side of the formula.
If there are no covariates, \code{1} should be specified on the right hand side of the formula. By default, covariates are placed on the "location" parameter of the distribution through a linear model.
Random effects can also be specified for each model parameter. See details for how these can be specified.}
\item{model.cost}{A formula expression in conventional \code{R} linear modelling syntax. The response must be a health economic
cost outcome ('c') whose name must correspond to that used in \code{data}. Any covariates in the model must be provided on the right-hand side of the formula.
If there are no covariates, \code{1} should be specified on the right hand side of the formula. By default, covariates are placed on the "location" parameter of the distribution through a linear model.
A joint bivariate distribution for effects and costs can be specified by including 'e' on the right-hand side of the formula for the costs model.
Random effects can also be specified for each model parameter. See details for how these can be specified.}
\item{model.me}{A formula expression in conventional \code{R} linear modelling syntax. The response must be indicated with the
term 'me'(missing effects) and any covariates must be provided on the right-hand side of the formula. If there are no covariates, \code{1} should be specified on the right hand side of the formula.
By default, covariates are placed on the "probability" parameter for the missing effects through a logistic-linear model.
Random effects can also be specified for each model parameter. See details for how these can be specified.}
\item{model.mc}{A formula expression in conventional \code{R} linear modelling syntax. The response must be indicated with the term 'mc'(missing costs) and any covariates must be provided on the right-hand side of the formula.
If there are no covariates, \code{1} should be specified on the right hand side of the formula. By default, covariates are placed on the "probability" parameter for the missing costs through a logistic-linear model.
Random effects can also be specified for each model parameter. See details for how these can be specified.}
\item{dist_e}{Distribution assumed for the effects. Current available chocies are: Normal ('norm'), Beta ('beta'), Gamma ('gamma'), Exponential ('exp'),
Weibull ('weibull'), Logistic ('logis'), Poisson ('pois'), Negative Binomial ('nbinom') or Bernoulli ('bern').}
\item{dist_c}{Distribution assumed for the costs. Current available chocies are: Normal ('norm'), Gamma ('gamma') or LogNormal ('lnorm').}
\item{type}{Type of missingness mechanism assumed. Choices are Missing At Random (MAR) and Missing Not At Random (MNAR).}
\item{prob}{A numeric vector of probabilities within the range (0,1), representing the upper and lower
CI sample quantiles to be calculated and returned for the imputed values.}
\item{n.chains}{Number of chains.}
\item{n.iter}{Number of iterations.}
\item{n.burnin}{Number of warmup iterations.}
\item{inits}{A list with elements equal to the number of chains selected; each element of the list is itself a list of starting values for the
\code{JAGS} model, or a function creating (possibly random) initial values. If \code{inits} is \code{NULL}, \code{JAGS}
will generate initial values for all the model parameters.}
\item{n.thin}{Thinning interval.}
\item{ppc}{Logical. If \code{ppc} is \code{TRUE}, the estimates of the parameters that can be used to generate replications from the model are saved.}
\item{save_model}{Logical. If \code{save_model} is \code{TRUE}, a \code{txt} file containing the model code is printed
in the current working directory.}
\item{prior}{A list containing the hyperprior values provided by the user. Each element of this list must be a vector of length two
containing the user-provided hyperprior values and must be named with the name of the corresponding parameter. For example, the hyperprior
values for the standard deviation effect parameters can be provided using the list \code{prior = list('sigma.prior.e' = c(0, 100))}.
For more information about how to provide prior hypervalues for different types of parameters and models see details.
If \code{prior} is set to 'default', the default values will be used.}
\item{...}{Additional arguments that can be provided by the user. Examples are \code{center = TRUE} to center all the covariates in the model
or the additional arguments that can be provided to the function \code{\link[BCEA]{bcea}} to summarise the health economic evaluation results.}
}
\value{
An object of the class 'missingHE' containing the following elements
\describe{
\item{data_set}{A list containing the original data set provided in \code{data} (see Arguments), the number of observed and missing individuals
, the total number of individuals by treatment arm and the indicator vectors for the missing values}
\item{model_output}{A list containing the output of a \code{JAGS} model generated from the functions \code{\link[R2jags]{jags}}, and
the posterior samples for the main parameters of the model and the imputed values}
\item{cea}{A list containing the output of the economic evaluation performed using the function \code{\link[BCEA]{bcea}}}
\item{type}{A character variable that indicate which type of missingness mechanism has been used to run the model,
either \code{MAR} or \code{MNAR} (see details)}
}
}
\description{
Full Bayesian cost-effectiveness models to handle missing data in the outcomes under different missing data
mechanism assumptions, using alternative parametric distributions for the effect and cost variables and
using a selection model approach to identify the model. The analysis is performed using the \code{BUGS} language,
which is implemented in the software \code{JAGS} using the function \code{\link[R2jags]{jags}} The output is stored in an object of class 'missingHE'.
}
\details{
Depending on the distributions specified for the outcome variables in the arguments \code{dist_e} and
\code{dist_c} and the type of missingness mechanism specified in the argument \code{type}, different selection models
are built and run in the background by the function \code{selection}. These models consist in logistic regressions that are used to estimate
the probability of missingness in one or both the outcomes. A simple example can be used to show how selection models are specified.
Consider a data set comprising a response variable \eqn{y} and a set of centered covariate \eqn{X_j}. For each subject in the trial \eqn{i = 1, ..., n}
we define an indicator variable \eqn{m_i} taking value \code{1} if the \eqn{i}-th individual is associated with a missing value and \code{0} otherwise.
This is modelled as:
\deqn{m_i ~ Bernoulli(\pi_i)}
\deqn{logit(\pi_i) = \gamma_0 + \sum\gamma_j X_j + \delta(y)}
where
\itemize{
\item \eqn{\pi_i} is the individual probability of a missing value in \eqn{y}
\item \eqn{\gamma_0} represents the marginal probability of a missing value in \eqn{y} on the logit scale.
\item \eqn{\gamma_j} represents the impact on the probability of a missing value in \eqn{y} of the centered covariates \eqn{X_j}.
\item \eqn{\delta} represents the impact on the probability of a missing value in \eqn{y} of the missing value itself.
}
When \eqn{\delta = 0} the model assumes a 'MAR' mechanism, while when \eqn{\delta != 0} the mechanism is 'MNAR'. For the parameters indexing the missingness model,
the default prior distributions assumed are the following:
\itemize{
\item \eqn{\gamma_0 ~ Logisitc(0, 1)}
\item \eqn{\gamma_j ~ Normal(0, 0.01)}
\item \eqn{\delta ~ Normal(0, 1)}
}
When user-defined hyperprior values are supplied via the argument \code{prior} in the function \code{selection}, the elements of this list (see Arguments)
must be vectors of length two containing the user-provided hyperprior values and must take specific names according to the parameters they are associated with.
Specifically, the names for the parameters indexing the model which are accepted by \strong{missingHE} are the following:
\itemize{
\item location parameters \eqn{\alpha_0} and \eqn{\beta_0}: "mean.prior.e"(effects) and/or "mean.prior.c"(costs)
\item auxiliary parameters \eqn{\sigma}: "sigma.prior.e"(effects) and/or "sigma.prior.c"(costs)
\item covariate parameters \eqn{\alpha_j} and \eqn{\beta_j}: "alpha.prior"(effects) and/or "beta.prior"(costs)
\item marginal probability of missing values \eqn{\gamma_0}: "p.prior.e"(effects) and/or "p.prior.c"(costs)
\item covariate parameters in the missingness model \eqn{\gamma_j} (if covariate data provided): "gamma.prior.e"(effects) and/or "gamma.prior.c"(costs)
\item mnar parameter \eqn{\delta}: "delta.prior.e"(effects) and/or "delta.prior.c"(costs)
}
For simplicity, here we have assumed that the set of covariates \eqn{X_j} used in the models for the effects/costs and in the
model of the missing effect/cost values is the same. However, it is possible to specify different sets of covariates for each model
using the arguments in the function \code{selection} (see Arguments).
For each model, random effects can also be specified for each parameter by adding the term + (x | z) to each model formula,
where x is the fixed regression coefficient for which also the random effects are desired and z is the clustering variable across which
the random effects are specified (must be the name of a factor variable in the dataset). Multiple random effects can be specified using the
notation + (x1 + x2 | site) for each covariate that was included in the fixed effects formula. Random intercepts are included by default in the models
if a random effects are specified but they can be removed by adding the term 0 within the random effects formula, e.g. + (0 + x | z).
}
\examples{
# Quck example to run using subset of MenSS dataset
MenSS.subset <- MenSS[50:100, ]
# Run the model using the selection function assuming a SCAR mechanism
# Use only 100 iterations to run a quick check
model.selection <- selection(data = MenSS.subset, model.eff = e ~ 1,model.cost = c ~ 1,
model.me = me ~ 1, model.mc = mc ~ 1, dist_e = "norm", dist_c = "norm",
type = "MAR", n.chains = 2, n.iter = 100, ppc = TRUE)
# Print the results of the JAGS model
print(model.selection)
#
# Use dic information criterion to assess model fit
pic.dic <- pic(model.selection, criterion = "dic", module = "total")
pic.dic
#
# Extract regression coefficient estimates
coef(model.selection)
#
\dontshow{
# Use waic information criterion to assess model fit
pic.waic <- pic(model.selection, criterion = "waic", module = "total")
pic.waic
}
# Assess model convergence using graphical tools
# Produce histograms of the posterior samples for the mean effects
diag.hist <- diagnostic(model.selection, type = "histogram", param = "mu.e")
#
# Compare observed effect data with imputations from the model
# using plots (posteiror means and credible intervals)
p1 <- plot(model.selection, class = "scatter", outcome = "effects")
#
# Summarise the CEA information from the model
summary(model.selection)
\donttest{
# Further examples which take longer to run
model.selection <- selection(data = MenSS, model.eff = e ~ u.0,model.cost = c ~ e,
model.se = me ~ u.0, model.mc = mc ~ 1, dist_e = "norm", dist_c = "norm",
type = "MAR", n.chains = 2, n.iter = 500, ppc = FALSE)
#
# Print results for all imputed values
print(model.selection, value.mis = TRUE)
# Use looic to assess model fit
pic.looic<-pic(model.selection, criterion = "looic", module = "total")
pic.looic
# Show density plots for all parameters
diag.hist <- diagnostic(model.selection, type = "denplot", param = "all")
# Plots of imputations for all data
p1 <- plot(model.selection, class = "scatter", outcome = "all")
# Summarise the CEA results
summary(model.selection)
}
#
#
}
\references{
Daniels, MJ. Hogan, JW. \emph{Missing Data in Longitudinal Studies: strategies for Bayesian modelling and sensitivity analysis}, CRC/Chapman Hall.
Baio, G.(2012). \emph{Bayesian Methods in Health Economics}. CRC/Chapman Hall, London.
Gelman, A. Carlin, JB., Stern, HS. Rubin, DB.(2003). \emph{Bayesian Data Analysis, 2nd edition}, CRC Press.
Plummer, M. \emph{JAGS: A program for analysis of Bayesian graphical models using Gibbs sampling.} (2003).
}
\seealso{
\code{\link[R2jags]{jags}}, \code{\link[BCEA]{bcea}}
}
\author{
Andrea Gabrio
}
\keyword{CEA}
\keyword{JAGS}
\keyword{Models}
\keyword{Selection}
\keyword{data}
\keyword{missing}
| /man/selection.Rd | no_license | Diarmuid78/missingHE | R | false | true | 13,694 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selection.R
\name{selection}
\alias{selection}
\title{Full Bayesian Models to handle missingness in Economic Evaluations (Selection Models)}
\usage{
selection(
data,
model.eff,
model.cost,
model.me = me ~ 1,
model.mc = mc ~ 1,
dist_e,
dist_c,
type,
prob = c(0.025, 0.975),
n.chains = 2,
n.iter = 20000,
n.burnin = floor(n.iter/2),
inits = NULL,
n.thin = 1,
ppc = FALSE,
save_model = FALSE,
prior = "default",
...
)
}
\arguments{
\item{data}{A data frame in which to find the variables supplied in \code{model.eff}, \code{model.cost} (model formulas for effects and costs)
and \code{model.me}, \code{model.mc} (model formulas for the missing effect and cost models). Among these,
effectiveness, cost and treatment indicator (only two arms) variables must always be provided and named 'e', 'c' and 't', respectively.}
\item{model.eff}{A formula expression in conventional \code{R} linear modelling syntax. The response must be a health economic
effectiveness outcome ('e') whose name must correspond to that used in \code{data}. Any covariates in the model must be provided on the right-hand side of the formula.
If there are no covariates, \code{1} should be specified on the right hand side of the formula. By default, covariates are placed on the "location" parameter of the distribution through a linear model.
Random effects can also be specified for each model parameter. See details for how these can be specified.}
\item{model.cost}{A formula expression in conventional \code{R} linear modelling syntax. The response must be a health economic
cost outcome ('c') whose name must correspond to that used in \code{data}. Any covariates in the model must be provided on the right-hand side of the formula.
If there are no covariates, \code{1} should be specified on the right hand side of the formula. By default, covariates are placed on the "location" parameter of the distribution through a linear model.
A joint bivariate distribution for effects and costs can be specified by including 'e' on the right-hand side of the formula for the costs model.
Random effects can also be specified for each model parameter. See details for how these can be specified.}
\item{model.me}{A formula expression in conventional \code{R} linear modelling syntax. The response must be indicated with the
term 'me'(missing effects) and any covariates must be provided on the right-hand side of the formula. If there are no covariates, \code{1} should be specified on the right hand side of the formula.
By default, covariates are placed on the "probability" parameter for the missing effects through a logistic-linear model.
Random effects can also be specified for each model parameter. See details for how these can be specified.}
\item{model.mc}{A formula expression in conventional \code{R} linear modelling syntax. The response must be indicated with the term 'mc'(missing costs) and any covariates must be provided on the right-hand side of the formula.
If there are no covariates, \code{1} should be specified on the right hand side of the formula. By default, covariates are placed on the "probability" parameter for the missing costs through a logistic-linear model.
Random effects can also be specified for each model parameter. See details for how these can be specified.}
\item{dist_e}{Distribution assumed for the effects. Current available chocies are: Normal ('norm'), Beta ('beta'), Gamma ('gamma'), Exponential ('exp'),
Weibull ('weibull'), Logistic ('logis'), Poisson ('pois'), Negative Binomial ('nbinom') or Bernoulli ('bern').}
\item{dist_c}{Distribution assumed for the costs. Current available chocies are: Normal ('norm'), Gamma ('gamma') or LogNormal ('lnorm').}
\item{type}{Type of missingness mechanism assumed. Choices are Missing At Random (MAR) and Missing Not At Random (MNAR).}
\item{prob}{A numeric vector of probabilities within the range (0,1), representing the upper and lower
CI sample quantiles to be calculated and returned for the imputed values.}
\item{n.chains}{Number of chains.}
\item{n.iter}{Number of iterations.}
\item{n.burnin}{Number of warmup iterations.}
\item{inits}{A list with elements equal to the number of chains selected; each element of the list is itself a list of starting values for the
\code{JAGS} model, or a function creating (possibly random) initial values. If \code{inits} is \code{NULL}, \code{JAGS}
will generate initial values for all the model parameters.}
\item{n.thin}{Thinning interval.}
\item{ppc}{Logical. If \code{ppc} is \code{TRUE}, the estimates of the parameters that can be used to generate replications from the model are saved.}
\item{save_model}{Logical. If \code{save_model} is \code{TRUE}, a \code{txt} file containing the model code is printed
in the current working directory.}
\item{prior}{A list containing the hyperprior values provided by the user. Each element of this list must be a vector of length two
containing the user-provided hyperprior values and must be named with the name of the corresponding parameter. For example, the hyperprior
values for the standard deviation effect parameters can be provided using the list \code{prior = list('sigma.prior.e' = c(0, 100))}.
For more information about how to provide prior hypervalues for different types of parameters and models see details.
If \code{prior} is set to 'default', the default values will be used.}
\item{...}{Additional arguments that can be provided by the user. Examples are \code{center = TRUE} to center all the covariates in the model
or the additional arguments that can be provided to the function \code{\link[BCEA]{bcea}} to summarise the health economic evaluation results.}
}
\value{
An object of the class 'missingHE' containing the following elements
\describe{
\item{data_set}{A list containing the original data set provided in \code{data} (see Arguments), the number of observed and missing individuals
, the total number of individuals by treatment arm and the indicator vectors for the missing values}
\item{model_output}{A list containing the output of a \code{JAGS} model generated from the functions \code{\link[R2jags]{jags}}, and
the posterior samples for the main parameters of the model and the imputed values}
\item{cea}{A list containing the output of the economic evaluation performed using the function \code{\link[BCEA]{bcea}}}
\item{type}{A character variable that indicate which type of missingness mechanism has been used to run the model,
either \code{MAR} or \code{MNAR} (see details)}
}
}
\description{
Full Bayesian cost-effectiveness models to handle missing data in the outcomes under different missing data
mechanism assumptions, using alternative parametric distributions for the effect and cost variables and
using a selection model approach to identify the model. The analysis is performed using the \code{BUGS} language,
which is implemented in the software \code{JAGS} using the function \code{\link[R2jags]{jags}} The output is stored in an object of class 'missingHE'.
}
\details{
Depending on the distributions specified for the outcome variables in the arguments \code{dist_e} and
\code{dist_c} and the type of missingness mechanism specified in the argument \code{type}, different selection models
are built and run in the background by the function \code{selection}. These models consist in logistic regressions that are used to estimate
the probability of missingness in one or both the outcomes. A simple example can be used to show how selection models are specified.
Consider a data set comprising a response variable \eqn{y} and a set of centered covariate \eqn{X_j}. For each subject in the trial \eqn{i = 1, ..., n}
we define an indicator variable \eqn{m_i} taking value \code{1} if the \eqn{i}-th individual is associated with a missing value and \code{0} otherwise.
This is modelled as:
\deqn{m_i ~ Bernoulli(\pi_i)}
\deqn{logit(\pi_i) = \gamma_0 + \sum\gamma_j X_j + \delta(y)}
where
\itemize{
\item \eqn{\pi_i} is the individual probability of a missing value in \eqn{y}
\item \eqn{\gamma_0} represents the marginal probability of a missing value in \eqn{y} on the logit scale.
\item \eqn{\gamma_j} represents the impact on the probability of a missing value in \eqn{y} of the centered covariates \eqn{X_j}.
\item \eqn{\delta} represents the impact on the probability of a missing value in \eqn{y} of the missing value itself.
}
When \eqn{\delta = 0} the model assumes a 'MAR' mechanism, while when \eqn{\delta != 0} the mechanism is 'MNAR'. For the parameters indexing the missingness model,
the default prior distributions assumed are the following:
\itemize{
\item \eqn{\gamma_0 ~ Logisitc(0, 1)}
\item \eqn{\gamma_j ~ Normal(0, 0.01)}
\item \eqn{\delta ~ Normal(0, 1)}
}
When user-defined hyperprior values are supplied via the argument \code{prior} in the function \code{selection}, the elements of this list (see Arguments)
must be vectors of length two containing the user-provided hyperprior values and must take specific names according to the parameters they are associated with.
Specifically, the names for the parameters indexing the model which are accepted by \strong{missingHE} are the following:
\itemize{
\item location parameters \eqn{\alpha_0} and \eqn{\beta_0}: "mean.prior.e"(effects) and/or "mean.prior.c"(costs)
\item auxiliary parameters \eqn{\sigma}: "sigma.prior.e"(effects) and/or "sigma.prior.c"(costs)
\item covariate parameters \eqn{\alpha_j} and \eqn{\beta_j}: "alpha.prior"(effects) and/or "beta.prior"(costs)
\item marginal probability of missing values \eqn{\gamma_0}: "p.prior.e"(effects) and/or "p.prior.c"(costs)
\item covariate parameters in the missingness model \eqn{\gamma_j} (if covariate data provided): "gamma.prior.e"(effects) and/or "gamma.prior.c"(costs)
\item mnar parameter \eqn{\delta}: "delta.prior.e"(effects) and/or "delta.prior.c"(costs)
}
For simplicity, here we have assumed that the set of covariates \eqn{X_j} used in the models for the effects/costs and in the
model of the missing effect/cost values is the same. However, it is possible to specify different sets of covariates for each model
using the arguments in the function \code{selection} (see Arguments).
For each model, random effects can also be specified for each parameter by adding the term + (x | z) to each model formula,
where x is the fixed regression coefficient for which also the random effects are desired and z is the clustering variable across which
the random effects are specified (must be the name of a factor variable in the dataset). Multiple random effects can be specified using the
notation + (x1 + x2 | site) for each covariate that was included in the fixed effects formula. Random intercepts are included by default in the models
if a random effects are specified but they can be removed by adding the term 0 within the random effects formula, e.g. + (0 + x | z).
}
\examples{
# Quck example to run using subset of MenSS dataset
MenSS.subset <- MenSS[50:100, ]
# Run the model using the selection function assuming a SCAR mechanism
# Use only 100 iterations to run a quick check
model.selection <- selection(data = MenSS.subset, model.eff = e ~ 1,model.cost = c ~ 1,
model.me = me ~ 1, model.mc = mc ~ 1, dist_e = "norm", dist_c = "norm",
type = "MAR", n.chains = 2, n.iter = 100, ppc = TRUE)
# Print the results of the JAGS model
print(model.selection)
#
# Use dic information criterion to assess model fit
pic.dic <- pic(model.selection, criterion = "dic", module = "total")
pic.dic
#
# Extract regression coefficient estimates
coef(model.selection)
#
\dontshow{
# Use waic information criterion to assess model fit
pic.waic <- pic(model.selection, criterion = "waic", module = "total")
pic.waic
}
# Assess model convergence using graphical tools
# Produce histograms of the posterior samples for the mean effects
diag.hist <- diagnostic(model.selection, type = "histogram", param = "mu.e")
#
# Compare observed effect data with imputations from the model
# using plots (posteiror means and credible intervals)
p1 <- plot(model.selection, class = "scatter", outcome = "effects")
#
# Summarise the CEA information from the model
summary(model.selection)
\donttest{
# Further examples which take longer to run
model.selection <- selection(data = MenSS, model.eff = e ~ u.0,model.cost = c ~ e,
model.se = me ~ u.0, model.mc = mc ~ 1, dist_e = "norm", dist_c = "norm",
type = "MAR", n.chains = 2, n.iter = 500, ppc = FALSE)
#
# Print results for all imputed values
print(model.selection, value.mis = TRUE)
# Use looic to assess model fit
pic.looic<-pic(model.selection, criterion = "looic", module = "total")
pic.looic
# Show density plots for all parameters
diag.hist <- diagnostic(model.selection, type = "denplot", param = "all")
# Plots of imputations for all data
p1 <- plot(model.selection, class = "scatter", outcome = "all")
# Summarise the CEA results
summary(model.selection)
}
#
#
}
\references{
Daniels, MJ. Hogan, JW. \emph{Missing Data in Longitudinal Studies: strategies for Bayesian modelling and sensitivity analysis}, CRC/Chapman Hall.
Baio, G.(2012). \emph{Bayesian Methods in Health Economics}. CRC/Chapman Hall, London.
Gelman, A. Carlin, JB., Stern, HS. Rubin, DB.(2003). \emph{Bayesian Data Analysis, 2nd edition}, CRC Press.
Plummer, M. \emph{JAGS: A program for analysis of Bayesian graphical models using Gibbs sampling.} (2003).
}
\seealso{
\code{\link[R2jags]{jags}}, \code{\link[BCEA]{bcea}}
}
\author{
Andrea Gabrio
}
\keyword{CEA}
\keyword{JAGS}
\keyword{Models}
\keyword{Selection}
\keyword{data}
\keyword{missing}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genotypeQC.R
\name{removedParentIdsMiss}
\alias{removedParentIdsMiss}
\title{Reset paternal and maternal codes}
\usage{
removedParentIdsMiss(plink, inputPrefix, outputPrefix)
}
\arguments{
\item{plink}{an executable program in either the current working directory
or somewhere in the command path.}
\item{inputPrefix}{the prefix of the input PLINK binary files.}
\item{outputPrefix}{the prefix of the output PLINK binary files.}
}
\value{
The output PLINK binary files.
}
\description{
Reset paternal and maternal codes of non-founders if parents not present.
Replace the paternal ID and maternal ID of subjects (childs) by the
value zero if the paternal ID and the maternal ID do not belong to any
subject (parent) with the same family ID as the child.
}
\details{
Do make sure that all your family relationships are correct
in your input data before applying this function. By default,
if parental IDs are provided for a sample,
they are not treated as a founder even if neither parent is
in the dataset. With no modifiers, --make-founders clears
both parental IDs whenever at least one parent is not in the dataset,
and the affected samples are now considered as founders.
}
\examples{
## In the current working directory
bedFile <- system.file("extdata", "genoUpdatedData.bed", package="Gimpute")
bimFile <- system.file("extdata", "genoUpdatedData.bim", package="Gimpute")
famFile <- system.file("extdata", "genoUpdatedData.fam", package="Gimpute")
system(paste0("scp ", bedFile, bimFile, famFile, " ."))
inputPrefix <- "genoUpdatedData"
outputPrefix <- "2_07_removedParentIdsMiss"
## Not run: Requires an executable program PLINK, e.g.
## plink <- "/home/tools/plink"
## removedParentIdsMiss(plink, inputPrefix, outputPrefix)
}
\author{
Junfang Chen
}
| /man/removedParentIdsMiss.Rd | no_license | transbioZI/Gimpute | R | false | true | 1,851 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genotypeQC.R
\name{removedParentIdsMiss}
\alias{removedParentIdsMiss}
\title{Reset paternal and maternal codes}
\usage{
removedParentIdsMiss(plink, inputPrefix, outputPrefix)
}
\arguments{
\item{plink}{an executable program in either the current working directory
or somewhere in the command path.}
\item{inputPrefix}{the prefix of the input PLINK binary files.}
\item{outputPrefix}{the prefix of the output PLINK binary files.}
}
\value{
The output PLINK binary files.
}
\description{
Reset paternal and maternal codes of non-founders if parents not present.
Replace the paternal ID and maternal ID of subjects (childs) by the
value zero if the paternal ID and the maternal ID do not belong to any
subject (parent) with the same family ID as the child.
}
\details{
Do make sure that all your family relationships are correct
in your input data before applying this function. By default,
if parental IDs are provided for a sample,
they are not treated as a founder even if neither parent is
in the dataset. With no modifiers, --make-founders clears
both parental IDs whenever at least one parent is not in the dataset,
and the affected samples are now considered as founders.
}
\examples{
## In the current working directory
bedFile <- system.file("extdata", "genoUpdatedData.bed", package="Gimpute")
bimFile <- system.file("extdata", "genoUpdatedData.bim", package="Gimpute")
famFile <- system.file("extdata", "genoUpdatedData.fam", package="Gimpute")
system(paste0("scp ", bedFile, bimFile, famFile, " ."))
inputPrefix <- "genoUpdatedData"
outputPrefix <- "2_07_removedParentIdsMiss"
## Not run: Requires an executable program PLINK, e.g.
## plink <- "/home/tools/plink"
## removedParentIdsMiss(plink, inputPrefix, outputPrefix)
}
\author{
Junfang Chen
}
|
library(dplyr)
library(tidyr)
library(lubridate)
library(cdcfluview)
library(MASS)
library(ggplot2)
library(mgcv)
library(zoo)
### Set your working directory as the "[1] Create_dataframe" folder throughout all the code code
setwd("[1] Create_dataframe")
source("mortality.R") ##download and arrange mortality data
source("flumarker.R") ##creates weekly flu incidences for later model adjustement
source("../data/misc/keep_state.R") ##list of states with sufficient cause-specific info
load('combined_flu.rda')
### combined-flu has weekly flu incidence proxy (flumarker) for each week and state, 2014-present
### later on, we will consider that flu is zero during COVID19, March 2020 to December 2021
statepop <- read.csv("../data/population/State Population till 2022.csv", stringsAsFactors = FALSE)
state.abrv <-read.csv("../data/misc/state abbreviations.csv", stringsAsFactors = FALSE)
combine1 =mortality %>% left_join(combined_flu,by =c('region','year','week')) %>%
merge(statepop, by=c('region', 'year')) %>%
merge(state.abrv, by='region') %>%
filter(region %in% keep_state, date <= '2022-09-30') %>%
arrange(region,date) %>%
mutate(season = as.factor(season),
flumarker=case_when(is.na(flumarker)~0,!is.na(flumarker)~flumarker),
natural = all_cause-external) %>%
dplyr::select(region, abbreviation, location, season, week, date, population, flumarker,
all_cause, alzheimers, cancer, cerebrovascular, diabetes, heart_disease,
resp.covid, resp.covid.ex, external, natural,
covid.mort, covid.mort.ex, covid.multiple)
##################### Update flumarker######################################################
#Cleaning flumarker indicator
#due to overestimation, na.spline was overestimating flumarker in December because there were
#not enough datapoints
combine2 <- lapply(split(combine1, combine1$region), function(x) {
x2 <- x %>%
arrange(date) %>%
group_by(season) %>%
mutate(seasonweek=1:n()) %>%
ungroup %>%
mutate(season=as.factor(season))
gfit <- gam(log(flumarker+1)~s(seasonweek)+season, data=filter(x2, date<"2020-03-01", !is.na(flumarker)))
pred <- predict(gfit, newdata=filter(x2, date<"2020-03-01"))
flumarker <- x2$flumarker
flumarker2 <- flumarker[x2$date < "2020-03-01"]
flumarker2[is.na(flumarker2)] <- exp(pred[is.na(flumarker2)])-1
flumarker2[flumarker2 < 0] <- 0
flumarker[1:length(flumarker2)] <- flumarker2
flumarker[x2$date >= "2020-03-01"][is.na(flumarker[x2$date >= "2020-03-01"])] <- 0
x2$flumarker <- flumarker
x2
}) %>%
bind_rows
### Take a moving average of weekly counts to stabililize deaths counts
kma=5 ## 5 wk moving avg
df_all5 <- combine2 %>%
group_by(region) %>%
mutate(all_cause.roll = rollmean(x=all_cause, k=kma, align='center', fill='extend'),
alzheimers.roll = rollmean(x=alzheimers, k=kma, align='center', fill='extend'),
cancer.roll = rollmean(x=cancer, k=kma, align='center', fill='extend'),
cerebrovascular.roll = rollmean(x=cerebrovascular, k=kma, align='center', fill='extend'),
diabetes.roll = rollmean(x=diabetes, k=kma, align='center', fill='extend'),
heart_disease.roll = rollmean(x=heart_disease, k=kma, align='center', fill='extend'),
resp.covid.roll = rollmean(x=resp.covid, k=kma, align='center', fill='extend'),
resp.covid.ex.roll = rollmean(x=resp.covid.ex, k=kma, align='center', fill='extend'),
external.roll = rollmean(x=external, k=kma, align='center', fill='extend'),
natural.roll = rollmean(x=natural, k=kma, align='center', fill='extend'),
covid.mort.roll = rollmean(x=covid.mort, k=kma, align='center', fill='extend'),
covid.mort.ex.roll = rollmean(x=covid.mort.ex, k=kma, align='center', fill='extend'),
covid.multiple.roll = rollmean(x=covid.multiple, k=kma, align='center', fill='extend')
) %>%
dplyr::select(region, abbreviation, location, season, week, date, population, flumarker,
all_cause, all_cause.roll,
alzheimers, alzheimers.roll,
cancer, cancer.roll,
cerebrovascular, cerebrovascular.roll,
diabetes,diabetes.roll,
heart_disease, heart_disease.roll,
resp.covid, resp.covid.roll, resp.covid.ex.roll,
external, external.roll,
natural, natural.roll,
covid.mort, covid.mort.roll, covid.mort.ex.roll,
covid.multiple, covid.multiple.roll) %>%
ungroup()
save('df_all5', file="../data/df_all5.rda")
| /[1] Create_dataframe/[1.1] Create dataframe.R | permissive | viboudc/DirectIndirectCOVID19MortalityEstimation | R | false | false | 4,763 | r | library(dplyr)
library(tidyr)
library(lubridate)
library(cdcfluview)
library(MASS)
library(ggplot2)
library(mgcv)
library(zoo)
### Set your working directory as the "[1] Create_dataframe" folder throughout all the code code
setwd("[1] Create_dataframe")
source("mortality.R") ##download and arrange mortality data
source("flumarker.R") ##creates weekly flu incidences for later model adjustement
source("../data/misc/keep_state.R") ##list of states with sufficient cause-specific info
load('combined_flu.rda')
### combined-flu has weekly flu incidence proxy (flumarker) for each week and state, 2014-present
### later on, we will consider that flu is zero during COVID19, March 2020 to December 2021
statepop <- read.csv("../data/population/State Population till 2022.csv", stringsAsFactors = FALSE)
state.abrv <-read.csv("../data/misc/state abbreviations.csv", stringsAsFactors = FALSE)
combine1 =mortality %>% left_join(combined_flu,by =c('region','year','week')) %>%
merge(statepop, by=c('region', 'year')) %>%
merge(state.abrv, by='region') %>%
filter(region %in% keep_state, date <= '2022-09-30') %>%
arrange(region,date) %>%
mutate(season = as.factor(season),
flumarker=case_when(is.na(flumarker)~0,!is.na(flumarker)~flumarker),
natural = all_cause-external) %>%
dplyr::select(region, abbreviation, location, season, week, date, population, flumarker,
all_cause, alzheimers, cancer, cerebrovascular, diabetes, heart_disease,
resp.covid, resp.covid.ex, external, natural,
covid.mort, covid.mort.ex, covid.multiple)
##################### Update flumarker######################################################
#Cleaning flumarker indicator
#due to overestimation, na.spline was overestimating flumarker in December because there were
#not enough datapoints
combine2 <- lapply(split(combine1, combine1$region), function(x) {
x2 <- x %>%
arrange(date) %>%
group_by(season) %>%
mutate(seasonweek=1:n()) %>%
ungroup %>%
mutate(season=as.factor(season))
gfit <- gam(log(flumarker+1)~s(seasonweek)+season, data=filter(x2, date<"2020-03-01", !is.na(flumarker)))
pred <- predict(gfit, newdata=filter(x2, date<"2020-03-01"))
flumarker <- x2$flumarker
flumarker2 <- flumarker[x2$date < "2020-03-01"]
flumarker2[is.na(flumarker2)] <- exp(pred[is.na(flumarker2)])-1
flumarker2[flumarker2 < 0] <- 0
flumarker[1:length(flumarker2)] <- flumarker2
flumarker[x2$date >= "2020-03-01"][is.na(flumarker[x2$date >= "2020-03-01"])] <- 0
x2$flumarker <- flumarker
x2
}) %>%
bind_rows
### Take a moving average of weekly counts to stabililize deaths counts
kma=5 ## 5 wk moving avg
df_all5 <- combine2 %>%
group_by(region) %>%
mutate(all_cause.roll = rollmean(x=all_cause, k=kma, align='center', fill='extend'),
alzheimers.roll = rollmean(x=alzheimers, k=kma, align='center', fill='extend'),
cancer.roll = rollmean(x=cancer, k=kma, align='center', fill='extend'),
cerebrovascular.roll = rollmean(x=cerebrovascular, k=kma, align='center', fill='extend'),
diabetes.roll = rollmean(x=diabetes, k=kma, align='center', fill='extend'),
heart_disease.roll = rollmean(x=heart_disease, k=kma, align='center', fill='extend'),
resp.covid.roll = rollmean(x=resp.covid, k=kma, align='center', fill='extend'),
resp.covid.ex.roll = rollmean(x=resp.covid.ex, k=kma, align='center', fill='extend'),
external.roll = rollmean(x=external, k=kma, align='center', fill='extend'),
natural.roll = rollmean(x=natural, k=kma, align='center', fill='extend'),
covid.mort.roll = rollmean(x=covid.mort, k=kma, align='center', fill='extend'),
covid.mort.ex.roll = rollmean(x=covid.mort.ex, k=kma, align='center', fill='extend'),
covid.multiple.roll = rollmean(x=covid.multiple, k=kma, align='center', fill='extend')
) %>%
dplyr::select(region, abbreviation, location, season, week, date, population, flumarker,
all_cause, all_cause.roll,
alzheimers, alzheimers.roll,
cancer, cancer.roll,
cerebrovascular, cerebrovascular.roll,
diabetes,diabetes.roll,
heart_disease, heart_disease.roll,
resp.covid, resp.covid.roll, resp.covid.ex.roll,
external, external.roll,
natural, natural.roll,
covid.mort, covid.mort.roll, covid.mort.ex.roll,
covid.multiple, covid.multiple.roll) %>%
ungroup()
save('df_all5', file="../data/df_all5.rda")
|
## Homework 6 - Analyzing congressional speech
# Before starting on the questions we need to do a little setup
# Import the 'textir' library
library(textir)
# Pull in the congress109 data
data(congress109)
# This brings in two sets: congress109Counts and congress109Ideology
# congress109Counts has representatives as the rows and speech n-grams
# as the columns with each intersection representing a count
# congress109Ideology has information on each representative including
# party, state, chamber, repshare => the share of the representatives
# district that voted for George Bush in 2004, cs1, cs2 => show a
# measure of how strongly a given representative votes along party lines
## Q1 - Fit K -means to speech text for K in 5,10,15,20,25. Use BIC to
## choose the K and interpret the selected model.
# Following the w8there example, we scale the counts by computing the
# frequency they appear relative to the average.
cong.counts.scaled <- scale(as.matrix( congress109Counts/rowSums(congress109Counts) ))
# Compute k-means for 5,10,15,20,25 groups
set.seed(823)
kfit.ccs <- lapply(c(5,10,15,20,25), function(k) kmeans(cong.counts.scaled,k))
# Use the kIC script to choose the appropriate # of clusters
source("../Utility Scripts/kIC.R")
kbic.ccs <- sapply(kfit.ccs,kIC,"B")
# Let's plot to see what it looks like
kaicc.ccs <- sapply(kfit.ccs,kIC)
## plot 'em
# png('aic_bic_vs_k.png')
plot(c(5,10,15,20,25),kaicc.ccs, xlab="K", ylab="IC",
main="IC vs Number of clusters (K)",ylim=range(c(kaicc.ccs,kbic.ccs)),xlim=c(5,25),
bty="n", type="l", lwd=2)
abline(v=which.min(kaicc.ccs)*5,lty=2)
lines(c(5,10,15,20,25),kbic.ccs, col=4, lwd=2)
abline(v=which.min(kbic.ccs)*5,col=4,lty=2)
legend(6,600000,c("AICc","AICc Min","BIC","BIC Min"),lty=c(1,2,1,2),col=c("black","black","blue","blue"))
# dev.off()
# Not a good picture as aicc appears to select a very complex model with
# >25 clusters and bic looks to select potentially no models at all.
# Within the bounds of this problem, we use BIC to select 5 clusters.
kfit.ccs.5c <- kfit.ccs[[1]]
kfit.ccs.5c.slices <- unlist(lapply(1:5, function(x) length(kfit.ccs.5c$cluster[kfit.ccs.5c$cluster==x])))
lbls <- c("Cluster 1 - ", "Cluster 2 - ", "Cluster 3 - ", "Cluster 4 - ", "Cluster 5 - ")
pct <- round(kfit.ccs.5c.slices/sum(kfit.ccs.5c.slices)*100)
lbls <- paste(lbls, kfit.ccs.5c.slices)
lbls <- paste(lbls, " (")
lbls <- paste(lbls, pct) # add percents to labels
lbls <- paste(lbls,"%)",sep="") # ad % to labels
pie(kfit.ccs.5c.slices,labels = lbls, col=rainbow(length(lbls)),
main="Share of Representatives by Cluster",clockwise=TRUE,cex=0.75)
tapply(congress109Ideology$party,kfit.ccs.5c$cluster,table)
# So about 2/3 of cluster 4 are republican and the rest are democrat
# Let's try to get a plot like something we have seen in class showing
# reps plotted in two dimensions and color coded by cluster.
# png('cluster_plot.png')
plot(congress109Ideology$repshare, congress109Ideology$cs1,col=rainbow(5)[kfit.ccs.5c$cluster],type="n",
main="Share of District Voting for George Bush in 2004 vs\n Voting Record of Representative",xlab="Share for George Bush",ylab="Voting Record")
text(congress109Ideology$repshare, congress109Ideology$cs1, labels=congress109Ideology$party, col=rainbow(5)[kfit.ccs.5c$cluster])
legend(0.1,0.8,c("Cluster 1", "Cluster 2", "Cluster 3", "Cluster 4", "Cluster 5"),lty=c(1,1,1,1,1),col=rainbow(5))
# dev.off()
cluster4.centers <- kfit.ccs.5c$centers[4,]
cluster4.centers.ordered <- cluster4.centers[order(cluster4.centers,decreasing=TRUE)]
## Q2
# Bring in the topic maps
library(maptpx)
c109.stm <- as.simple_triplet_matrix(congress109Counts)
c109.tpcs <- topics(c109.stm,K=5*(1:5), verb=1)
# Bayes factor is maximized at K=10 so we select that for our topic model
# Print the most frequently used words by topic
lapply(1:10, function(x) rownames(c109.tpcs$theta)[order(c109.tpcs$theta[,x], decreasing=TRUE)[1:10]])
# Show a word cloud to visualize common words from each topic
library(wordcloud)
wordcloud(row.names(c109.tpcs$theta),
freq=c109.tpcs$theta[,1], min.freq=0.006, col="maroon")
wordcloud(row.names(c109.tpcs$theta),
freq=c109.tpcs$theta[,2], min.freq=0.006, col="navy")
| /hw6_congspeech.r | no_license | bd-41201/hw6 | R | false | false | 4,203 | r | ## Homework 6 - Analyzing congressional speech
# Before starting on the questions we need to do a little setup
# Import the 'textir' library
library(textir)
# Pull in the congress109 data
data(congress109)
# This brings in two sets: congress109Counts and congress109Ideology
# congress109Counts has representatives as the rows and speech n-grams
# as the columns with each intersection representing a count
# congress109Ideology has information on each representative including
# party, state, chamber, repshare => the share of the representatives
# district that voted for George Bush in 2004, cs1, cs2 => show a
# measure of how strongly a given representative votes along party lines
## Q1 - Fit K -means to speech text for K in 5,10,15,20,25. Use BIC to
## choose the K and interpret the selected model.
# Following the w8there example, we scale the counts by computing the
# frequency they appear relative to the average.
cong.counts.scaled <- scale(as.matrix( congress109Counts/rowSums(congress109Counts) ))
# Compute k-means for 5,10,15,20,25 groups
set.seed(823)
kfit.ccs <- lapply(c(5,10,15,20,25), function(k) kmeans(cong.counts.scaled,k))
# Use the kIC script to choose the appropriate # of clusters
source("../Utility Scripts/kIC.R")
kbic.ccs <- sapply(kfit.ccs,kIC,"B")
# Let's plot to see what it looks like
kaicc.ccs <- sapply(kfit.ccs,kIC)
## plot 'em
# png('aic_bic_vs_k.png')
plot(c(5,10,15,20,25),kaicc.ccs, xlab="K", ylab="IC",
main="IC vs Number of clusters (K)",ylim=range(c(kaicc.ccs,kbic.ccs)),xlim=c(5,25),
bty="n", type="l", lwd=2)
abline(v=which.min(kaicc.ccs)*5,lty=2)
lines(c(5,10,15,20,25),kbic.ccs, col=4, lwd=2)
abline(v=which.min(kbic.ccs)*5,col=4,lty=2)
legend(6,600000,c("AICc","AICc Min","BIC","BIC Min"),lty=c(1,2,1,2),col=c("black","black","blue","blue"))
# dev.off()
# Not a good picture as aicc appears to select a very complex model with
# >25 clusters and bic looks to select potentially no models at all.
# Within the bounds of this problem, we use BIC to select 5 clusters.
kfit.ccs.5c <- kfit.ccs[[1]]
kfit.ccs.5c.slices <- unlist(lapply(1:5, function(x) length(kfit.ccs.5c$cluster[kfit.ccs.5c$cluster==x])))
lbls <- c("Cluster 1 - ", "Cluster 2 - ", "Cluster 3 - ", "Cluster 4 - ", "Cluster 5 - ")
pct <- round(kfit.ccs.5c.slices/sum(kfit.ccs.5c.slices)*100)
lbls <- paste(lbls, kfit.ccs.5c.slices)
lbls <- paste(lbls, " (")
lbls <- paste(lbls, pct) # add percents to labels
lbls <- paste(lbls,"%)",sep="") # ad % to labels
pie(kfit.ccs.5c.slices,labels = lbls, col=rainbow(length(lbls)),
main="Share of Representatives by Cluster",clockwise=TRUE,cex=0.75)
tapply(congress109Ideology$party,kfit.ccs.5c$cluster,table)
# So about 2/3 of cluster 4 are republican and the rest are democrat
# Let's try to get a plot like something we have seen in class showing
# reps plotted in two dimensions and color coded by cluster.
# png('cluster_plot.png')
plot(congress109Ideology$repshare, congress109Ideology$cs1,col=rainbow(5)[kfit.ccs.5c$cluster],type="n",
main="Share of District Voting for George Bush in 2004 vs\n Voting Record of Representative",xlab="Share for George Bush",ylab="Voting Record")
text(congress109Ideology$repshare, congress109Ideology$cs1, labels=congress109Ideology$party, col=rainbow(5)[kfit.ccs.5c$cluster])
legend(0.1,0.8,c("Cluster 1", "Cluster 2", "Cluster 3", "Cluster 4", "Cluster 5"),lty=c(1,1,1,1,1),col=rainbow(5))
# dev.off()
cluster4.centers <- kfit.ccs.5c$centers[4,]
cluster4.centers.ordered <- cluster4.centers[order(cluster4.centers,decreasing=TRUE)]
## Q2
# Bring in the topic maps
library(maptpx)
c109.stm <- as.simple_triplet_matrix(congress109Counts)
c109.tpcs <- topics(c109.stm,K=5*(1:5), verb=1)
# Bayes factor is maximized at K=10 so we select that for our topic model
# Print the most frequently used words by topic
lapply(1:10, function(x) rownames(c109.tpcs$theta)[order(c109.tpcs$theta[,x], decreasing=TRUE)[1:10]])
# Show a word cloud to visualize common words from each topic
library(wordcloud)
wordcloud(row.names(c109.tpcs$theta),
freq=c109.tpcs$theta[,1], min.freq=0.006, col="maroon")
wordcloud(row.names(c109.tpcs$theta),
freq=c109.tpcs$theta[,2], min.freq=0.006, col="navy")
|
setwd("~/GitHub/phd/ra/R files")
source("helpful.functions.R")
library(readxl)
library(writexl)
library(tidyverse)
library(mice)
library(rjags)
setwd("C:/Users/ms19g661/Desktop")
# xlsx files
mydata <- read_excel("ra_dataset.xlsx")
BSRBR <- mydata %>% filter(study == "BSRBR")
SCQM <- mydata %>% filter(study == "SCQM")
TOWARD <- mydata %>% filter(study == "TOWARD")
REFLEX <- mydata %>% filter(study == "REFLEX")
#################
# first stage analysis
# second stage analysis
setwd("C:/Users/ms19g661/Desktop/RData")
load("REFLEX-ApproachI-bayesLASSO.RData")
load("TOWARD-ApproachI-bayesLASSO.RData")
load("BSRBR-ApproachI-bayesLASSO.RData")
load("SCQM-ApproachI-bayesLASSO.Rdata")
y_TOWARD2 <- c(4.9625, 0.1060, -0.0278, 0.0143, -0.0066, 0.1309, -0.0560, 0.0974, 0.1670, 0.4551,
-0.0196, 0.0416, -0.0129, -0.0222, -0.0291, -0.0275, -0.0015, -0.0100, -0.0043, -1.6943)
Omega_TOWARD2 <- as.matrix(read_excel("Omega_TOWARD2_bayesLASSO.xlsx", col_names = FALSE))
X_mean <- c(0.83, 52.33, 9.10, 27.74, 0.82, 1.59, 1.53, 46.39, 6.54)
X_sd <- c(0.38, 12.11, 8.18, 6.44, 0.38, 1.46, 0.61, 24.74, 0.96)
setwd("C:/Users/ms19g661/Documents/GitHub/phd/ra/JAGS files") #set the location to where JAGS file exists
###############################################################################
### apparent performance
#find summary mean and covariance matrix for each study
r1 <- summarize_each_study(samples_BSRBR)
r1 <- unstandardize_coefficients(r1, BSRBR)
r2 <- summarize_each_study(samples_SCQM)
r2 <- unstandardize_coefficients(r2, SCQM)
r3 <- summarize_each_study(samples_REFLEX)
r3 <- unstandardize_coefficients(r3, REFLEX)
r4 <- summarize_each_study(samples_TOWARD)
r4 <- unstandardize_coefficients(r4, TOWARD)
r5 <- list(y = y_TOWARD2, Omega = Omega_TOWARD2)
r5 <- unstandardize_coefficients(r5, X_mean = X_mean, X_sd = X_sd)
y <- list(y1 = r1[[1]], y2 = r2[[1]], y3 = r3[[1]], y4 = r4[[1]], y5 = r5[[1]])
Sigma <- list(Sigma1 = r1[[2]], Sigma2 = r2[[2]], Sigma3 = r3[[2]], Sigma4 = r4[[2]], Sigma5 = r5[[2]])
#internal validation
result <- secondStage(y = y, Sigma = Sigma, jags_file = "second stage-ApproachII.txt")
prediction_SCQM <- findPrediction(SCQM, result)
prediction_BSRBR <- findPrediction(BSRBR, result)
performance_SCQM <- findPerformance(prediction_SCQM)
performance_BSRBR <- findPerformance(prediction_BSRBR)
apparent_performance_SCQM <- unlist(lapply(performance_SCQM, mean))
apparent_performance_BSRBR <- unlist(lapply(performance_BSRBR, mean))
##### Finding optimism: SCQM
set.seed(1)
optimism <- matrix(NA, nrow = 200, ncol = 9)
colnames(optimism) <- c("mse", "bias", "mse1", "bias1", "mse2", "bias2", "mse3", "bias3", "rsquared")
for(ii in 1:200){
SCQM_bootstrap <- SCQM[sample(1:dim(SCQM)[1], replace = TRUE),]
samples_SCQM_bootstrap <- firstStage(SCQM_bootstrap, "first stage-bayesLASSO.txt", mm = 1)
r2 <- summarize_each_study(samples_SCQM_bootstrap)
r2 <- unstandardize_coefficients(r2, SCQM_bootstrap)
y <- list(y1 = r1[[1]], y2 = r2[[1]], y3 = r3[[1]], y4 = r4[[1]], y5 = r5[[1]])
Sigma <- list(Sigma1 = r1[[2]], Sigma2 = r2[[2]], Sigma3 = r3[[2]], Sigma4 = r4[[2]], Sigma5 = r5[[2]])
result <- secondStage(y = y, Sigma = Sigma, jags_file = "second stage-ApproachII.txt", n.iter = 10000)
prediction_SCQM_bootstrap <- findPrediction(SCQM_bootstrap, result)
performance_SCQM_bootstrap <- findPerformance(prediction_SCQM_bootstrap)
prediction_SCQM_test <- findPrediction(SCQM, result)
performance_SCQM_test <- findPerformance(prediction_SCQM_test)
optimism[ii,] <- mapply('-',lapply(performance_SCQM_bootstrap, mean),lapply(performance_SCQM_test, mean),SIMPLIFY=TRUE)
}
optimism_averaged <- apply(optimism, 2, mean, na.rm = TRUE)
optimism_corrected_performance_SCQM <- apparent_performance_SCQM - optimism_averaged
##### Finding optimism: BSRBR
set.seed(1)
optimism2 <- matrix(NA, nrow = 200, ncol = 9)
colnames(optimism2) <- c("mse", "bias", "mse1", "bias1", "mse2", "bias2", "mse3", "bias3", "rsquared")
for(ii in 1:200){
BSRBR_bootstrap <- BSRBR[sample(1:dim(BSRBR)[1], replace = TRUE),]
samples_BSRBR_bootstrap <- firstStage(BSRBR_bootstrap, "first stage-bayesLASSO.txt", mm = 1)
r1 <- summarize_each_study(samples_BSRBR_bootstrap)
r1 <- unstandardize_coefficients(r1, BSRBR_bootstrap)
y <- list(y1 = r1[[1]], y2 = r2[[1]], y3 = r3[[1]], y4 = r4[[1]], y5 = r5[[1]])
Sigma <- list(Sigma1 = r1[[2]], Sigma2 = r2[[2]], Sigma3 = r3[[2]], Sigma4 = r4[[2]], Sigma5 = r5[[2]])
result <- secondStage(y = y, Sigma = Sigma, jags_file = "second stage-ApproachII.txt", n.iter = 10000)
prediction_BSRBR_bootstrap <- findPrediction(BSRBR_bootstrap, result)
performance_BSRBR_bootstrap <- findPerformance(prediction_BSRBR_bootstrap)
prediction_BSRBR_test <- findPrediction(BSRBR, result)
performance_BSRBR_test <- findPerformance(prediction_BSRBR_test)
optimism2[ii,] <- mapply('-',lapply(performance_BSRBR_bootstrap, mean),lapply(performance_BSRBR_test, mean),SIMPLIFY=TRUE)
}
optimism2_averaged <- apply(optimism2, 2, mean, na.rm = TRUE)
optimism_corrected_performance_BSRBR <- apparent_performance_BSRBR - optimism2_averaged
| /ra/additional analyses/Approach IIb - optimism-corrected.R | no_license | MikeJSeo/phd | R | false | false | 5,156 | r | setwd("~/GitHub/phd/ra/R files")
source("helpful.functions.R")
library(readxl)
library(writexl)
library(tidyverse)
library(mice)
library(rjags)
setwd("C:/Users/ms19g661/Desktop")
# xlsx files
mydata <- read_excel("ra_dataset.xlsx")
BSRBR <- mydata %>% filter(study == "BSRBR")
SCQM <- mydata %>% filter(study == "SCQM")
TOWARD <- mydata %>% filter(study == "TOWARD")
REFLEX <- mydata %>% filter(study == "REFLEX")
#################
# first stage analysis
# second stage analysis
setwd("C:/Users/ms19g661/Desktop/RData")
load("REFLEX-ApproachI-bayesLASSO.RData")
load("TOWARD-ApproachI-bayesLASSO.RData")
load("BSRBR-ApproachI-bayesLASSO.RData")
load("SCQM-ApproachI-bayesLASSO.Rdata")
y_TOWARD2 <- c(4.9625, 0.1060, -0.0278, 0.0143, -0.0066, 0.1309, -0.0560, 0.0974, 0.1670, 0.4551,
-0.0196, 0.0416, -0.0129, -0.0222, -0.0291, -0.0275, -0.0015, -0.0100, -0.0043, -1.6943)
Omega_TOWARD2 <- as.matrix(read_excel("Omega_TOWARD2_bayesLASSO.xlsx", col_names = FALSE))
X_mean <- c(0.83, 52.33, 9.10, 27.74, 0.82, 1.59, 1.53, 46.39, 6.54)
X_sd <- c(0.38, 12.11, 8.18, 6.44, 0.38, 1.46, 0.61, 24.74, 0.96)
setwd("C:/Users/ms19g661/Documents/GitHub/phd/ra/JAGS files") #set the location to where JAGS file exists
###############################################################################
### apparent performance
#find summary mean and covariance matrix for each study
r1 <- summarize_each_study(samples_BSRBR)
r1 <- unstandardize_coefficients(r1, BSRBR)
r2 <- summarize_each_study(samples_SCQM)
r2 <- unstandardize_coefficients(r2, SCQM)
r3 <- summarize_each_study(samples_REFLEX)
r3 <- unstandardize_coefficients(r3, REFLEX)
r4 <- summarize_each_study(samples_TOWARD)
r4 <- unstandardize_coefficients(r4, TOWARD)
r5 <- list(y = y_TOWARD2, Omega = Omega_TOWARD2)
r5 <- unstandardize_coefficients(r5, X_mean = X_mean, X_sd = X_sd)
y <- list(y1 = r1[[1]], y2 = r2[[1]], y3 = r3[[1]], y4 = r4[[1]], y5 = r5[[1]])
Sigma <- list(Sigma1 = r1[[2]], Sigma2 = r2[[2]], Sigma3 = r3[[2]], Sigma4 = r4[[2]], Sigma5 = r5[[2]])
#internal validation
result <- secondStage(y = y, Sigma = Sigma, jags_file = "second stage-ApproachII.txt")
prediction_SCQM <- findPrediction(SCQM, result)
prediction_BSRBR <- findPrediction(BSRBR, result)
performance_SCQM <- findPerformance(prediction_SCQM)
performance_BSRBR <- findPerformance(prediction_BSRBR)
apparent_performance_SCQM <- unlist(lapply(performance_SCQM, mean))
apparent_performance_BSRBR <- unlist(lapply(performance_BSRBR, mean))
##### Finding optimism: SCQM
set.seed(1)
optimism <- matrix(NA, nrow = 200, ncol = 9)
colnames(optimism) <- c("mse", "bias", "mse1", "bias1", "mse2", "bias2", "mse3", "bias3", "rsquared")
for(ii in 1:200){
SCQM_bootstrap <- SCQM[sample(1:dim(SCQM)[1], replace = TRUE),]
samples_SCQM_bootstrap <- firstStage(SCQM_bootstrap, "first stage-bayesLASSO.txt", mm = 1)
r2 <- summarize_each_study(samples_SCQM_bootstrap)
r2 <- unstandardize_coefficients(r2, SCQM_bootstrap)
y <- list(y1 = r1[[1]], y2 = r2[[1]], y3 = r3[[1]], y4 = r4[[1]], y5 = r5[[1]])
Sigma <- list(Sigma1 = r1[[2]], Sigma2 = r2[[2]], Sigma3 = r3[[2]], Sigma4 = r4[[2]], Sigma5 = r5[[2]])
result <- secondStage(y = y, Sigma = Sigma, jags_file = "second stage-ApproachII.txt", n.iter = 10000)
prediction_SCQM_bootstrap <- findPrediction(SCQM_bootstrap, result)
performance_SCQM_bootstrap <- findPerformance(prediction_SCQM_bootstrap)
prediction_SCQM_test <- findPrediction(SCQM, result)
performance_SCQM_test <- findPerformance(prediction_SCQM_test)
optimism[ii,] <- mapply('-',lapply(performance_SCQM_bootstrap, mean),lapply(performance_SCQM_test, mean),SIMPLIFY=TRUE)
}
optimism_averaged <- apply(optimism, 2, mean, na.rm = TRUE)
optimism_corrected_performance_SCQM <- apparent_performance_SCQM - optimism_averaged
##### Finding optimism: BSRBR
set.seed(1)
optimism2 <- matrix(NA, nrow = 200, ncol = 9)
colnames(optimism2) <- c("mse", "bias", "mse1", "bias1", "mse2", "bias2", "mse3", "bias3", "rsquared")
for(ii in 1:200){
BSRBR_bootstrap <- BSRBR[sample(1:dim(BSRBR)[1], replace = TRUE),]
samples_BSRBR_bootstrap <- firstStage(BSRBR_bootstrap, "first stage-bayesLASSO.txt", mm = 1)
r1 <- summarize_each_study(samples_BSRBR_bootstrap)
r1 <- unstandardize_coefficients(r1, BSRBR_bootstrap)
y <- list(y1 = r1[[1]], y2 = r2[[1]], y3 = r3[[1]], y4 = r4[[1]], y5 = r5[[1]])
Sigma <- list(Sigma1 = r1[[2]], Sigma2 = r2[[2]], Sigma3 = r3[[2]], Sigma4 = r4[[2]], Sigma5 = r5[[2]])
result <- secondStage(y = y, Sigma = Sigma, jags_file = "second stage-ApproachII.txt", n.iter = 10000)
prediction_BSRBR_bootstrap <- findPrediction(BSRBR_bootstrap, result)
performance_BSRBR_bootstrap <- findPerformance(prediction_BSRBR_bootstrap)
prediction_BSRBR_test <- findPrediction(BSRBR, result)
performance_BSRBR_test <- findPerformance(prediction_BSRBR_test)
optimism2[ii,] <- mapply('-',lapply(performance_BSRBR_bootstrap, mean),lapply(performance_BSRBR_test, mean),SIMPLIFY=TRUE)
}
optimism2_averaged <- apply(optimism2, 2, mean, na.rm = TRUE)
optimism_corrected_performance_BSRBR <- apparent_performance_BSRBR - optimism2_averaged
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ManageVariables.R
\name{ManageVariables}
\alias{ManageVariables}
\title{GUI: Variable Manager}
\usage{
ManageVariables(cols, vars, query, changelog, parent = NULL)
}
\arguments{
\item{cols}{list.
See \sQuote{Value} section}
\item{vars}{list.
See \sQuote{Value} section}
\item{query}{character.
See \sQuote{Value} section}
\item{changelog}{data.frame.
See \sQuote{Value} section}
\item{parent}{tkwin.
\acronym{GUI} parent window}
}
\value{
Returns an object of class list with components \code{cols} and \code{vars}.
The \code{cols} object is a list whose length is equal to the current number of data variables.
Each component in \code{cols} is linked to a specific variable,
and contains the following components:
\item{name}{variable name}
\item{format}{conversion specification format (optional)}
\item{id}{unique identifier that is created from \code{name}.}
\item{fun}{expression evaluated when computing the variables vector of values.}
\item{index}{variable's component index number in the \code{data.raw} data table, see \code{\link{ImportText}}.
Only required for variables directly linked to data columns in \code{data.raw}.}
\item{class}{data class of the vector object.}
\item{summary}{summary of the variable's descriptive statistics (see \code{\link{summary}}).}
\item{comments}{user comments}
The \code{vars} object is a list with components:
\item{x, y, z, sort.on}{the index number of the corresponding state variable in \code{cols}.
These indexes are updated to reflect the removal and (or) reordering of variables in \code{cols}.}
\item{query}{if required, variable names are updated.}
\item{changelog}{if required, names in the \code{variable} component are updated.}
}
\description{
A graphical user interface (\acronym{GUI}) for managing variables in the data table.
}
\details{
This \acronym{GUI} lets you:
(1) specify the names and format of variables;
(2) add new variables based on user defined functions, see \code{\link{EditFunction}};
(3) display data in a spreadsheet, see \code{\link{EditData}}; and
(4) remove and (or) reorder variables in the data table.
}
\examples{
\dontrun{
Data(replace.all = obj)
ManageVariables(obj$cols, obj$vars, obj$query, obj$changelog)
}
}
\author{
J.C. Fisher, U.S. Geological Survey, Idaho Water Science Center
}
\keyword{misc}
| /man/ManageVariables.Rd | permissive | cran/RSurvey | R | false | true | 2,515 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ManageVariables.R
\name{ManageVariables}
\alias{ManageVariables}
\title{GUI: Variable Manager}
\usage{
ManageVariables(cols, vars, query, changelog, parent = NULL)
}
\arguments{
\item{cols}{list.
See \sQuote{Value} section}
\item{vars}{list.
See \sQuote{Value} section}
\item{query}{character.
See \sQuote{Value} section}
\item{changelog}{data.frame.
See \sQuote{Value} section}
\item{parent}{tkwin.
\acronym{GUI} parent window}
}
\value{
Returns an object of class list with components \code{cols} and \code{vars}.
The \code{cols} object is a list whose length is equal to the current number of data variables.
Each component in \code{cols} is linked to a specific variable,
and contains the following components:
\item{name}{variable name}
\item{format}{conversion specification format (optional)}
\item{id}{unique identifier that is created from \code{name}.}
\item{fun}{expression evaluated when computing the variables vector of values.}
\item{index}{variable's component index number in the \code{data.raw} data table, see \code{\link{ImportText}}.
Only required for variables directly linked to data columns in \code{data.raw}.}
\item{class}{data class of the vector object.}
\item{summary}{summary of the variable's descriptive statistics (see \code{\link{summary}}).}
\item{comments}{user comments}
The \code{vars} object is a list with components:
\item{x, y, z, sort.on}{the index number of the corresponding state variable in \code{cols}.
These indexes are updated to reflect the removal and (or) reordering of variables in \code{cols}.}
\item{query}{if required, variable names are updated.}
\item{changelog}{if required, names in the \code{variable} component are updated.}
}
\description{
A graphical user interface (\acronym{GUI}) for managing variables in the data table.
}
\details{
This \acronym{GUI} lets you:
(1) specify the names and format of variables;
(2) add new variables based on user defined functions, see \code{\link{EditFunction}};
(3) display data in a spreadsheet, see \code{\link{EditData}}; and
(4) remove and (or) reorder variables in the data table.
}
\examples{
\dontrun{
Data(replace.all = obj)
ManageVariables(obj$cols, obj$vars, obj$query, obj$changelog)
}
}
\author{
J.C. Fisher, U.S. Geological Survey, Idaho Water Science Center
}
\keyword{misc}
|
#' Calculate jaccard distance for two sets of character vectors
#'
#' @param set1 character vector 1
#' @param set2 character vector 2
#'
#' @return jaccard distance
#' @export
#'
#' @examples
#' JaccardSets(sample(LETTERS, 10), sample(LETTERS, 10))
JaccardSets<- function(set1, set2){
length(intersect(set1, set2))/length(unique(c(set1, set2)))
}
#' Calculate pair-wise Jaccard distance for @@ident slots from two Seurat objects
#'
#' Calculate pair-wise Jaccard distance for two named factor vector. e.g.
#' seurat_obj1@ident and seurat_obj2@ident
#'
#' @param ident1 a named factor vector. names are the cell names, the values are
#' the cluster id.
#' @param ident2 a named factor vector. names are the cell names, the values are
#' the cluster id.
#'
#' @return a matrix of pair-wise Jaccard distance. Rows are clusters from ident1,
#' columns are clusters from ident2
#' @export
#'
#' @examples
#' \dontrun{
#' PairWiseJaccardSets(pbmc@@ident, pbmc_small@@ident)
#'}
#'
PairWiseJaccardSets<- function(ident1, ident2){
ident1.list<- split(names(ident1), ident1)
ident2.list<- split(names(ident2), ident2)
res<- matrix(nrow = length(ident1.list), ncol = length(ident2.list),
dimnames = list(names(ident1.list), names(ident2.list)))
for (i in seq_along(ident1.list)){
res[i, ]<- purrr::map_dbl(ident2.list, ~JaccardSets(ident1.list[[i]], .x))
}
return(res)
}
#' Calculate pair-wise overlapping cluster identities for @@ident slots from two Seurat objects
#'
#'Calculate pair-wise overlapping cluster identities for two named factor vector. e.g.
#' seurat_obj1@ident and seurat_obj2@ident
#' @param ident1 a named factor vector. names are the cell names, the values are
#' the cluster id.
#' @param ident2 a named factor vector. names are the cell names, the values are
#' the cluster id.
#'
#' @return A matrix of pairwise number of common cell identities for each cluster.
#' @export
#'
#' @examples
#' \dontrun{
#' PairWiseOverlappingIdents(pbmc@@ident, pbmc_small@@ident)
#' }
PairWiseOverlappingIdents<- function(ident1, ident2){
ident1.list<- split(names(ident1), ident1)
ident2.list<- split(names(ident2), ident2)
res<- c()
for (i in seq_along(ident1.list)){
ind<- purrr::map_dbl(ident2.list, ~length(intersect(ident1.list[[i]], .x)))
res<- rbind(res, ind)
}
rownames(res)<- names(ident1.list)
return(res)
}
#' Match two run of cluster ids with highest Jaccard index
#'
#' @param ident1 a named factor vector. names are the cell names, the values are
#' the cluster id.
#' @param ident2 a named factor vector. names are the cell names, the values are
#' the cluster id.
#'
#' @return A tibble with two columns, column 1 is the cluster ids from ident1, column2
#' is the cluster ids from ident2.
#' @export
#'
#' @examples
#' \dontrun{
#' MatchClusters(pbmc@@ident, pbmc_small@@ident)
#' }
MatchClusters<- function(ident1, ident2){
jaccard_mat<- PairWiseJaccardSets(ident1, ident2)
get_corresponding_cluster<- function(x){
id<- which.max(x)
return(colnames(jaccard_mat)[id])
}
matching_ids<- apply(jaccard_mat, 1, get_corresponding_cluster)
return(tibble::tibble(ident1 = names(matching_ids), ident2 = matching_ids))
}
#' Assign highest Jaccard index for each cluster of the subsampled data set before
#' reclustering with the cluster identites of subsampled data set after reclustering
#'
#' @param idents1 A list of cluster identity copied from the orginal data sets.
#' idents1 is a list of the cluster identity from the subsampled data sets before reclustering.
#' @param idents2 A list of cluster identity from the subsampled data sets.
#' idents2 is a list of the cluster identity from the subsampled data sets after reclustering.
#' The order of identities in idents1 and idents2 should correspond to each other.
#'
#' @return A matrix with dimention of #number of subsampling * #number of clusters in the
#' original data set.
#' @export
#'
#' @examples
AssignHighestJaccard<- function(idents1, idents2){
mat_list<- purrr::map2(idents1, idents2, ~PairWiseJaccardSets(ident1 = .x, ident2 = .y))
SelectHighestJaccard<- function(mat){
apply(mat, 1, max)
}
# or use the anonymous function
mat_max<- purrr::map(mat_list, SelectHighestJaccard)
mats<- purrr::reduce(mat_max, dplyr::bind_rows)
return(mats)
}
#' Assign stable cluster
#'
#' @param idents1 A list of cluster identity copied from the orginal data sets.
#' idents1 is a list of the cluster identity from the subsampled data sets before reclustering.
#' @param idents2 A list of cluster identity from the subsampled data sets.
#' idents2 is a list of the cluster identity from the subsampled data sets after reclustering.
#' The order of identities in idents1 and idents2 should correspond to each other.
#' @param method what way to summarize the jaccard index across all simulations.
#' to determine a cluster is stable or not. options are "Jaccard_mean", "Jaccard_median" and "Jaccard_percent"
#' @param jaccard_cutoff Cutoff of the jaccard index to determin a cluster is stable or not.
#' it is the mean or median cutoff when the method is "jaccard_mean" or "jaccard_median" and it is
#' the cutoff for every subsampling when the method is "jaccard_percent"
#' @param percent_cutoff The percentage of jaccard index greater than jaccard_cutoff. Used
#' when method is "jaccard_percent". specify 0.6 when you mean 60%.
#'
#' @return A list containing the raw data for jaccard index for all simulations,
#' TRUE or FALSE of stable cluster for each cluster and a number of stable clusters.
#' A cluster is deemed as stable if the median (or mean) jaccard index is > cutoff.
#' in addtion, a stable_index is calculated, which is the pecentage of jaccard index >
#' cutoff for all the subsampling. e.g. for 100 times subsampling, 0.8 means 80% of the
#' time, the jaccard index is > cutoff. Sometimes, we see bimodal distrbution of the
#' 100 jaccard index, the percentage is a better measurement than the mean or median of the
#' 100 jaccard index.
#'
#' @export
#'
#' @examples
#'
#' data(idents)
#'
#' AssignStableCluster(idents, idents)
#'
AssignStableCluster<- function(idents1, idents2,
method = "jaccard_median",
jaccard_cutoff = 0.6,
percent_cutoff = 0.6){
mats<- AssignHighestJaccard(idents1, idents2)
stable_index<- (mats > jaccard_cutoff) %>%
as.data.frame() %>%
dplyr::summarise_all(mean) %>%
unlist()
if (method == "jaccard_mean"){
stable_cluster<- mats %>%
dplyr::summarise_all(mean) %>%
dplyr::mutate_all(~ifelse(.x > jaccard_cutoff, TRUE, FALSE)) %>%
unlist()
number_of_stable_cluster<- sum(stable_cluster)
} else if (method == "jaccard_median"){
stable_cluster<- mats %>%
dplyr::summarise_all(median) %>%
dplyr::mutate_all(~ifelse(.x > jaccard_cutoff, TRUE, FALSE)) %>%
unlist()
number_of_stable_cluster<- sum(stable_cluster)
} else if (method == "jaccard_percent"){
number_of_stable_cluster<- sum(stable_index > percent_cutoff)
stable_cluster<- stable_index > percent_cutoff
} else {
stop("please specify jaccard_mean, jaccard_median or jaccard_percent
for method")
}
return(list(jaccardIndex = mats, stable_cluster = stable_cluster,
number_of_stable_cluster = number_of_stable_cluster,
stable_index = stable_index))
}
#' Calculate the percentage of cells in stable clusters in the full data set
#'
#' @param ident. A named factor vector. names are the cell names, the values are
#' the cluster id from the full data set.
#' @param stable_cluster. A logical vector for each of the original cluster indicating
#' it is stable or not, calculated from \code{\link{AssignStableCluster}}
#'
#' @return A percentage of cells in stable cluster
#' @export
#'
#' @examples
CalculatePercentCellInStable<- function(ident, stable_cluster){
ident.list<- split(names(ident), ident)
number_of_cells_each_cluster<- purrr::map_int(ident.list, length)
percent_cell_in_stable<- sum(number_of_cells_each_cluster[stable_cluster])/sum(number_of_cells_each_cluster)
return(percent_cell_in_stable)
}
#' Bootstrap for a fully processed Seurat object
#'
#' @param object A fully processed Seurat object.
#' @param n Number of times you want to bootstrap.
#' @param rate A number between 0 and 1 for subsampling the cells.
#' @param ... Other parameters passed to \code{\link{PreprocessSubsetData}}
#'
#' @return A list of lists containing the ident from the subsetted reclustered
#' seurat objects.
#' @export
#'
#' @examples
#'
# # see https://github.com/satijalab/seurat/issues/457
# # parallelize Seurat functions. The authors decided to go with the future framework.
# scClusterBoot<- function(object, n = 4, workers = 4, rate = 0.8, ...){
# multicoreParam <- BiocParallel::MulticoreParam(workers = workers)
# BiocParallel::register(multicoreParam)
# # the parameter n is not used inside the function
# GetProcessedSubsetDataCluster<- function(n, ...){
# object<- RandomSubsetData(object, rate = rate)
# object<- PreprocessSubsetData(object, ...)
# return(list(ident = object@ident, pc.sig = object@meta.data$pc.sig))
# }
# boot_clusters<- BiocParallel::bplapply(1:n, GetProcessedSubsetDataCluster)
# return(boot_clusters)
# }
# scClusterBoot<- function(object, n = 4, workers = 4, rate = 0.8, ...){
# future::plan(multiprocess)
# # the parameter n is not used inside the function
# GetProcessedSubsetDataCluster<- function(n, ...){
# object<- RandomSubsetData(object, rate = rate)
# object<- PreprocessSubsetData(object, ...)
# return(list(ident = object@ident, pc.sig = object@meta.data$pc.sig))
# }
# boot_clusters<- future.apply::future_lapply(1:n, GetProcessedSubsetDataCluster)
# return(boot_clusters)
# }
scClusterBoot<- function(object, n = 4, rate = 0.8, ...){
# the parameter n is not used inside the function
GetProcessedSubsetDataCluster<- function(n, ...){
object<- RandomSubsetData(object, rate = rate)
object<- PreprocessSubsetData(object, ...)
return(list(ident = object@ident, pc.sig = object@meta.data$pc.sig))
}
boot_clusters<- lapply(1:n, GetProcessedSubsetDataCluster)
return(boot_clusters)
}
| /R/scclusterboot.R | permissive | crazyhottommy/scclusteval | R | false | false | 11,085 | r |
#' Calculate jaccard distance for two sets of character vectors
#'
#' @param set1 character vector 1
#' @param set2 character vector 2
#'
#' @return jaccard distance
#' @export
#'
#' @examples
#' JaccardSets(sample(LETTERS, 10), sample(LETTERS, 10))
JaccardSets<- function(set1, set2){
length(intersect(set1, set2))/length(unique(c(set1, set2)))
}
#' Calculate pair-wise Jaccard distance for @@ident slots from two Seurat objects
#'
#' Calculate pair-wise Jaccard distance for two named factor vector. e.g.
#' seurat_obj1@ident and seurat_obj2@ident
#'
#' @param ident1 a named factor vector. names are the cell names, the values are
#' the cluster id.
#' @param ident2 a named factor vector. names are the cell names, the values are
#' the cluster id.
#'
#' @return a matrix of pair-wise Jaccard distance. Rows are clusters from ident1,
#' columns are clusters from ident2
#' @export
#'
#' @examples
#' \dontrun{
#' PairWiseJaccardSets(pbmc@@ident, pbmc_small@@ident)
#'}
#'
PairWiseJaccardSets<- function(ident1, ident2){
ident1.list<- split(names(ident1), ident1)
ident2.list<- split(names(ident2), ident2)
res<- matrix(nrow = length(ident1.list), ncol = length(ident2.list),
dimnames = list(names(ident1.list), names(ident2.list)))
for (i in seq_along(ident1.list)){
res[i, ]<- purrr::map_dbl(ident2.list, ~JaccardSets(ident1.list[[i]], .x))
}
return(res)
}
#' Calculate pair-wise overlapping cluster identities for @@ident slots from two Seurat objects
#'
#'Calculate pair-wise overlapping cluster identities for two named factor vector. e.g.
#' seurat_obj1@ident and seurat_obj2@ident
#' @param ident1 a named factor vector. names are the cell names, the values are
#' the cluster id.
#' @param ident2 a named factor vector. names are the cell names, the values are
#' the cluster id.
#'
#' @return A matrix of pairwise number of common cell identities for each cluster.
#' @export
#'
#' @examples
#' \dontrun{
#' PairWiseOverlappingIdents(pbmc@@ident, pbmc_small@@ident)
#' }
PairWiseOverlappingIdents<- function(ident1, ident2){
ident1.list<- split(names(ident1), ident1)
ident2.list<- split(names(ident2), ident2)
res<- c()
for (i in seq_along(ident1.list)){
ind<- purrr::map_dbl(ident2.list, ~length(intersect(ident1.list[[i]], .x)))
res<- rbind(res, ind)
}
rownames(res)<- names(ident1.list)
return(res)
}
#' Match two run of cluster ids with highest Jaccard index
#'
#' @param ident1 a named factor vector. names are the cell names, the values are
#' the cluster id.
#' @param ident2 a named factor vector. names are the cell names, the values are
#' the cluster id.
#'
#' @return A tibble with two columns, column 1 is the cluster ids from ident1, column2
#' is the cluster ids from ident2.
#' @export
#'
#' @examples
#' \dontrun{
#' MatchClusters(pbmc@@ident, pbmc_small@@ident)
#' }
MatchClusters<- function(ident1, ident2){
jaccard_mat<- PairWiseJaccardSets(ident1, ident2)
get_corresponding_cluster<- function(x){
id<- which.max(x)
return(colnames(jaccard_mat)[id])
}
matching_ids<- apply(jaccard_mat, 1, get_corresponding_cluster)
return(tibble::tibble(ident1 = names(matching_ids), ident2 = matching_ids))
}
#' Assign highest Jaccard index for each cluster of the subsampled data set before
#' reclustering with the cluster identites of subsampled data set after reclustering
#'
#' @param idents1 A list of cluster identity copied from the orginal data sets.
#' idents1 is a list of the cluster identity from the subsampled data sets before reclustering.
#' @param idents2 A list of cluster identity from the subsampled data sets.
#' idents2 is a list of the cluster identity from the subsampled data sets after reclustering.
#' The order of identities in idents1 and idents2 should correspond to each other.
#'
#' @return A matrix with dimention of #number of subsampling * #number of clusters in the
#' original data set.
#' @export
#'
#' @examples
AssignHighestJaccard<- function(idents1, idents2){
mat_list<- purrr::map2(idents1, idents2, ~PairWiseJaccardSets(ident1 = .x, ident2 = .y))
SelectHighestJaccard<- function(mat){
apply(mat, 1, max)
}
# or use the anonymous function
mat_max<- purrr::map(mat_list, SelectHighestJaccard)
mats<- purrr::reduce(mat_max, dplyr::bind_rows)
return(mats)
}
#' Assign stable cluster
#'
#' @param idents1 A list of cluster identity copied from the orginal data sets.
#' idents1 is a list of the cluster identity from the subsampled data sets before reclustering.
#' @param idents2 A list of cluster identity from the subsampled data sets.
#' idents2 is a list of the cluster identity from the subsampled data sets after reclustering.
#' The order of identities in idents1 and idents2 should correspond to each other.
#' @param method what way to summarize the jaccard index across all simulations.
#' to determine a cluster is stable or not. options are "Jaccard_mean", "Jaccard_median" and "Jaccard_percent"
#' @param jaccard_cutoff Cutoff of the jaccard index to determin a cluster is stable or not.
#' it is the mean or median cutoff when the method is "jaccard_mean" or "jaccard_median" and it is
#' the cutoff for every subsampling when the method is "jaccard_percent"
#' @param percent_cutoff The percentage of jaccard index greater than jaccard_cutoff. Used
#' when method is "jaccard_percent". specify 0.6 when you mean 60%.
#'
#' @return A list containing the raw data for jaccard index for all simulations,
#' TRUE or FALSE of stable cluster for each cluster and a number of stable clusters.
#' A cluster is deemed as stable if the median (or mean) jaccard index is > cutoff.
#' in addtion, a stable_index is calculated, which is the pecentage of jaccard index >
#' cutoff for all the subsampling. e.g. for 100 times subsampling, 0.8 means 80% of the
#' time, the jaccard index is > cutoff. Sometimes, we see bimodal distrbution of the
#' 100 jaccard index, the percentage is a better measurement than the mean or median of the
#' 100 jaccard index.
#'
#' @export
#'
#' @examples
#'
#' data(idents)
#'
#' AssignStableCluster(idents, idents)
#'
AssignStableCluster<- function(idents1, idents2,
method = "jaccard_median",
jaccard_cutoff = 0.6,
percent_cutoff = 0.6){
mats<- AssignHighestJaccard(idents1, idents2)
stable_index<- (mats > jaccard_cutoff) %>%
as.data.frame() %>%
dplyr::summarise_all(mean) %>%
unlist()
if (method == "jaccard_mean"){
stable_cluster<- mats %>%
dplyr::summarise_all(mean) %>%
dplyr::mutate_all(~ifelse(.x > jaccard_cutoff, TRUE, FALSE)) %>%
unlist()
number_of_stable_cluster<- sum(stable_cluster)
} else if (method == "jaccard_median"){
stable_cluster<- mats %>%
dplyr::summarise_all(median) %>%
dplyr::mutate_all(~ifelse(.x > jaccard_cutoff, TRUE, FALSE)) %>%
unlist()
number_of_stable_cluster<- sum(stable_cluster)
} else if (method == "jaccard_percent"){
number_of_stable_cluster<- sum(stable_index > percent_cutoff)
stable_cluster<- stable_index > percent_cutoff
} else {
stop("please specify jaccard_mean, jaccard_median or jaccard_percent
for method")
}
return(list(jaccardIndex = mats, stable_cluster = stable_cluster,
number_of_stable_cluster = number_of_stable_cluster,
stable_index = stable_index))
}
#' Calculate the percentage of cells in stable clusters in the full data set
#'
#' @param ident. A named factor vector. names are the cell names, the values are
#' the cluster id from the full data set.
#' @param stable_cluster. A logical vector for each of the original cluster indicating
#' it is stable or not, calculated from \code{\link{AssignStableCluster}}
#'
#' @return A percentage of cells in stable cluster
#' @export
#'
#' @examples
CalculatePercentCellInStable<- function(ident, stable_cluster){
ident.list<- split(names(ident), ident)
number_of_cells_each_cluster<- purrr::map_int(ident.list, length)
percent_cell_in_stable<- sum(number_of_cells_each_cluster[stable_cluster])/sum(number_of_cells_each_cluster)
return(percent_cell_in_stable)
}
#' Bootstrap for a fully processed Seurat object
#'
#' @param object A fully processed Seurat object.
#' @param n Number of times you want to bootstrap.
#' @param rate A number between 0 and 1 for subsampling the cells.
#' @param ... Other parameters passed to \code{\link{PreprocessSubsetData}}
#'
#' @return A list of lists containing the ident from the subsetted reclustered
#' seurat objects.
#' @export
#'
#' @examples
#'
# # see https://github.com/satijalab/seurat/issues/457
# # parallelize Seurat functions. The authors decided to go with the future framework.
# scClusterBoot<- function(object, n = 4, workers = 4, rate = 0.8, ...){
# multicoreParam <- BiocParallel::MulticoreParam(workers = workers)
# BiocParallel::register(multicoreParam)
# # the parameter n is not used inside the function
# GetProcessedSubsetDataCluster<- function(n, ...){
# object<- RandomSubsetData(object, rate = rate)
# object<- PreprocessSubsetData(object, ...)
# return(list(ident = object@ident, pc.sig = object@meta.data$pc.sig))
# }
# boot_clusters<- BiocParallel::bplapply(1:n, GetProcessedSubsetDataCluster)
# return(boot_clusters)
# }
# scClusterBoot<- function(object, n = 4, workers = 4, rate = 0.8, ...){
# future::plan(multiprocess)
# # the parameter n is not used inside the function
# GetProcessedSubsetDataCluster<- function(n, ...){
# object<- RandomSubsetData(object, rate = rate)
# object<- PreprocessSubsetData(object, ...)
# return(list(ident = object@ident, pc.sig = object@meta.data$pc.sig))
# }
# boot_clusters<- future.apply::future_lapply(1:n, GetProcessedSubsetDataCluster)
# return(boot_clusters)
# }
scClusterBoot<- function(object, n = 4, rate = 0.8, ...){
# the parameter n is not used inside the function
GetProcessedSubsetDataCluster<- function(n, ...){
object<- RandomSubsetData(object, rate = rate)
object<- PreprocessSubsetData(object, ...)
return(list(ident = object@ident, pc.sig = object@meta.data$pc.sig))
}
boot_clusters<- lapply(1:n, GetProcessedSubsetDataCluster)
return(boot_clusters)
}
|
# Functions for summarizing and plotting performance measures
# Functions for calculating performance measures ---------------------------
library(RcppRoll)
cv <- function(ts){
mean = mean(ts)
sd = sd(ts)
CV <- sd/mean
return(CV)
}
nzeroes <- function(x){ # How many years with zero catch
#x is a vector
n <- length(which(x==0))
return(n)
}
good4pred <- function(x, F0.x){ # Nyears above a "good for predators" threshold (here, it's 80% of the long term mean unfished biomass, given rec variation). This is the same as "number of bonanza years"
B.bar <- mean(F0.x)
thresh <- 0.8*B.bar
g4p <- length(which(x>thresh))
return(g4p)
}
bad4pred <- function(x, F0.x){ # Number of years that are below a certain threshold (here, it's 10% of the long term mean unfished biomass, given rec variation)
# x is a time series of biomass
B.bar <- mean(F0.x)
thresh <- 0.2*B.bar
b4p <- length(which(x<thresh))
return(b4p)
}
collapse.index <- function(x,F0.x){ # Which years have "collapses"-- use this function to ID the collapse years and look at fishing rates leading up to those collapses.
B.bar <- mean(F0.x)
thresh <- 0.2*B.bar
if(any(x<thresh)){
collapse.ind <- which(x<thresh)
}
else collapse.ind <- 0
return(collapse.ind)
}
bonanza.index <- function(x,F0.x){ # Which years have "bonanzas"-- use this function to ID the bonanza years and what conditions led up to them.
B.bar <- mean(F0.x)
thresh <- 0.8*B.bar
if(any(x>thresh)){
bonanza.ind <- which(x>thresh)
}
else bonanza.ind <- 0
return(bonanza.ind)
}
duration.collapse <- function(x, F0.x){
# This function returns the min, max, and mean duration of collapses
collapses <- collapse.index(x = x, F0.x = F0.x)
collapse_tf <- logical(length = length(F0.x))
collapse_tf[collapses] <- TRUE # change to True/False vector so you can measure duration of collapses
y <- rle(collapse_tf)
collapse.lengths <- y$lengths[y$values==TRUE]
if(length(collapse.lengths) == 0){
shortest.collapse <- NA
longest.collapse <- NA
avg.collapse.length <- NA
} else{
shortest.collapse <- min(collapse.lengths, na.rm = T)
longest.collapse <- max(collapse.lengths, na.rm = T)
avg.collapse.length <- mean(collapse.lengths, na.rm = T)
}
return(list("shortest.collapse" = shortest.collapse,
"longest.collapse" = longest.collapse,
"avg.collapse.length" = avg.collapse.length))
}
duration.bonanza <- function(x, F0.x){
# This function returns the min, max, and mean duration of bonanzas
bonanzas <- bonanza.index(x = x, F0.x = F0.x)
bonanza_tf <- logical(length = length(F0.x))
bonanza_tf[bonanzas] <- TRUE # change to True/False vector so you can measure duration of bonanzas
y <- rle(bonanza_tf)
bonanza.lengths <- y$lengths[y$values==TRUE]
if(length(bonanza.lengths) == 0){
shortest.bonanza <- NA
longest.bonanza <- NA
avg.bonanza.length <- NA
} else{
shortest.bonanza <- min(bonanza.lengths, na.rm = T)
longest.bonanza <- max(bonanza.lengths, na.rm = T)
avg.bonanza.length <- mean(bonanza.lengths, na.rm = T)
}
return(list("shortest.bonanza" = shortest.bonanza,
"longest.bonanza" = longest.bonanza,
"avg.bonanza.length" = avg.bonanza.length))
}
#mean.duration.bonanza(x=testie$biomass.total.true,F0.x = F0.Type)
n.bonafide.collapses <- function(ts.length, coll.yrs){
binary.coll.vec <- rep(0, times=ts.length)
binary.coll.vec[coll.yrs] <- 1 # Turn into vector of 0s and 1s
m <- rle(binary.coll.vec==1)
c <- m$lengths
d <- m$values
counter <- 0
if(any(d)){
starts <- which(d) # indexes rle outputs to get locations where collapses start
for(l in 1:length(starts)){
ind.coll <- starts[l]
if(c[ind.coll]>=2 & ind.coll != 1 & (ind.coll+2) < length(c)){
#if the sequence of collapse is preceded and followed by at least 4 yrs non-collapse
if(c[(ind.coll-1)]>=4 & c[ind.coll+1]>=4){counter <- counter+1} else{
if(c[ind.coll+1]<=4 & c[ind.coll+2]>=2){counter <- counter +1} else{
counter=counter}}}else{counter=counter}
# if the 4 years leading up to the collapse are not-collapsed, and same with the four years after, it's a "bonafide collapse" (later called "extended collapse")
}
# Last case: if the final years are collapsed, need to count those!
if(tail(d,1) & tail(c,1) > 4){counter=counter+1}else{counter=counter}
}
return(counter)
}
n.multiyr.closures <- function(x, threshold = NA) { #where x is a matrix, rows are sims, cols are years
count1 <- count5 <- count10 <- vector(length=nrow(x))
for(i in 1:nrow(x)){ #Either catch OR biomass
ltm <- mean(x[i,],na.rm = T)
if(is.na(threshold)){ thresh <- 0.01*ltm } # threshold can be anything - default is 1% of long term mean C or B
else{thresh = threshold}
badTorF <- x[i,] <= thresh
oneyr <- sum(roll_sum(badTorF, 1) == 1,na.rm = T)
fiveyr <- sum(roll_sum(badTorF, 5) == 5,na.rm = T)
tenyr <- sum(roll_sum(badTorF, 10) == 10,na.rm = T)
count1[i] <- oneyr # number of one-year closures
count5[i] <- fiveyr #number of five year closures
count10[i] <- tenyr # number of 10 year closures
}
return(list(count1 = count1,count5 = count5,count10 = tenyr)) # Mean number of 5- and 10-yr closures
}
# Rcpproll demo:
# set.seed(1); x <- sample(c(T, F), 100, replace = T); sum(RcppRoll::roll_sum(x, 3) == 3)
# Get summary metrics for Zeh plots or quantiles
summ.tab <- function(result.list, individual.sim = FALSE,calc.ind = calc.ind, ou.ind = NA){ #result.list is one of the results (=1 harvest rule, 1000 time series of biomass, catch, fishing, rec, depl)
# calc.ind is the index of which years to calculate over
for(i in 1:length(result.list)){
result.list[[i]] <- result.list[[i]][,calc.ind]
if(length(ou.ind)>1){result.list[[i]] <- result.list[[i]][ou.ind,]}
} # Trim results to the years we're using
LTmeans.list <- lapply(result.list,FUN = rowMeans,na.rm=TRUE)
# median and quantiles of LTM of all PMs
ltm <- lapply(LTmeans.list,FUN = quantile, probs = c(0.05,0.5,0.95),na.rm=TRUE)
# mean nonzero catch
catch <- result.list$total.catch
nonzero.catch <- ifelse(catch<0.1,NA,catch)
ltm.nzc1 <- rowMeans(nonzero.catch,na.rm=TRUE)
ltm.nzc2 <- quantile(ltm.nzc1,probs = c(0.05,0.5,0.95),na.rm = TRUE)
# SDcatch
sd.catches <- apply(catch, MARGIN = 1,FUN = sd, na.rm = TRUE)
sd.catch <- quantile(sd.catches,probs = c(0.05,0.5,0.95))
#Number of years w zero catch
nz1 <- apply(catch,MARGIN = 1,FUN = nzeroes)
#5- and 10-yr closures
n.5yrclose <- n.multiyr.closures(catch,threshold = 0)$count5 / nz1 # P(5yr closure | closure)
n.10yrclose <- n.multiyr.closures(catch,threshold = 0)$count10 / n.5yrclose # P(10yr closure | 5yr closure) - depracated
# SDbiomass
true.biomass <- result.list$biomass.total.true
sd.Bs <- apply(true.biomass, MARGIN = 1,FUN = sd, na.rm = TRUE)
sd.B <- quantile(sd.Bs,probs = c(0.05,0.5,0.95))
#Years that are "good for predators"
g4p.vec <- vector()
for(sim in 1:nrow(true.biomass)){
g4p.vec[sim] <- good4pred(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim])
}
# Number of years that are below a predator threshold
yrs.bad <- vector()
for(sim in 1:nrow(true.biomass)){
yrs.bad[sim] <- bad4pred(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])
}
# Maximum duration of collapse
max.duration.collapse = min.duration.collapse = avg.duration.collapse <- vector()
max.duration.bonanza = min.duration.bonanza = avg.duration.bonanza <- vector()
prob.collapse = collapse.severity = cv.vec = bonafide.collapse = vector()
depl <- matrix(nrow=nrow(true.biomass),ncol=ncol(true.biomass))
for(sim in 1:nrow(true.biomass)){ # Each one = 1 simulation
max.duration.collapse[sim] <- duration.collapse(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$longest.collapse
min.duration.collapse[sim] <- duration.collapse(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$shortest.collapse
avg.duration.collapse[sim] <- duration.collapse(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$avg.collapse.length
max.duration.bonanza[sim] <- duration.bonanza(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$longest.bonanza
min.duration.bonanza[sim] <- duration.bonanza(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$shortest.bonanza
avg.duration.bonanza[sim] <- duration.bonanza(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$avg.bonanza.length
# Probability of collapse
coll.yrs <- collapse.index(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])
prob.collapse[sim] <- length(coll.yrs) / length(true.biomass[sim,])
# Severity of collapse (as a percentage of mean unfished biomass)
coll.thresh <- mean(result.list$no.fishing.tb[sim,]) * 0.2
collapse.yrs <- true.biomass[sim,coll.yrs]
collapse.severity[sim] <- 1 - (mean(collapse.yrs,na.rm=T) / coll.thresh )
# median biomass at collapse, over the median unfished biomass for that simulation - a higher value of that is "better" (less severe) so substract it from 1 to get higher number == worse severity.
#Depletion based on unfished B0
depl[sim,] <- true.biomass[sim,]/result.list$no.fishing.tb[sim,]
#CV of catches
cv.vec[sim] <- cv(catch[sim,])
# N "bonafide" collapse periods (this is different than dipping below 0.2Bbar, you have to have a streak of non-collapse years before and after)
bonafide.collapse[sim] <- n.bonafide.collapses(ts.length = length(true.biomass[sim,]),coll.yrs = coll.yrs)
} #end of sim loop
mean.depl <- apply(depl,MARGIN = 1,FUN = mean)
# Performance metrics
interval <- c(0.05,0.5,0.95)
ltm.c <- ltm$total.catch # ltmcatch
ltm.nzc <- ltm.nzc2 #ltmnonzerocatch
SDcatch <- sd.catch #SD(Catch)
n5yr <- quantile(n.5yrclose,probs = interval, na.rm=TRUE) #n.5yrclose
n10yr <- quantile(n.10yrclose,probs = interval, na.rm=TRUE) #n.10yrclose
nz <- quantile(nz1,probs = interval) #nyrs.0catch
ltm.b <- ltm$biomass.total.true #LTMBiomass
g4p <- quantile(g4p.vec,probs = interval) #Nyears "good for predator"
sdB <- sd.B #SD(Biomass)
b4p <- quantile(yrs.bad,probs = interval) #p(bad4preds)
ltm.depl <- quantile(depl, probs = interval,na.rm=T) # Mean depletion
prob.coll <- quantile(prob.collapse,probs = interval,na.rm=T)
severity <- quantile(collapse.severity,probs = interval,na.rm=T)
cv.catch <- quantile(cv.vec,probs = interval, na.rm=T)
b.collapse <- quantile(bonafide.collapse,probs = interval, na.rm=T)
# Awkward but necessary for when there are NAs in these vectors
if(all(is.na(max.duration.collapse))){
overall.max.coll.len <- rep(NA,times=3)
}else{
overall.max.coll.len <- c(NA, max(max.duration.collapse,na.rm=T), NA) #Fill quantiles w NAs because other metrics have quantiles and these don't!
}
# Ugh
if(all(is.na(max.duration.bonanza))){
overall.max.bon.len <- rep(NA,times=3)
}else{
overall.max.bon.len <- c(NA, max(max.duration.bonanza,na.rm=T), NA)
}
# Sigh
if(all(is.na(avg.duration.bonanza))){
bon.length <- rep(NA,times=3)
} else{
bon.length <- quantile(avg.duration.bonanza,probs = interval,na.rm = T)
}
# whatever
if(all(is.na(avg.duration.collapse))){
coll.length <- rep(NA,times=3)
} else{
coll.length <- quantile(avg.duration.collapse,probs = interval, na.rm = T)
}
output <- data.frame(PM = performance.measures, loCI = NA, med = NA, hiCI = NA)
output[,-1] <- rbind(ltm.c,ltm.nzc,SDcatch,n5yr,n10yr,nz,ltm.b,g4p,sdB,b4p,ltm.depl,overall.max.coll.len,overall.max.bon.len,bon.length,coll.length,prob.coll,severity,cv.catch,b.collapse)
sim.output <- list()
#LTmean catch
#LTmean nonzero catch
#SDcatch
#n5yr
#n10yr
#yrs w zero catch
#LTmean biomass
#yrs good4preds
#SDbiomass
#yrs bad4preds
#ltm depletion
#max collapse length (across ALL SIMULATIONS) - do not use in correlations
#max bonanza length (across ALL SIMULATIONS) - do not use in correlations
#mean bonanza length
#mean collapse length
#probability of a collapse
#collapse severity
#cv(catch)
#sustained collapse
sim.output[[1]] <- LTmeans.list$total.catch
sim.output[[2]] <- ltm.nzc1
sim.output[[3]] <- sd.catches
sim.output[[4]] <- n.5yrclose
sim.output[[5]] <- n.10yrclose
sim.output[[6]] <- nz1
sim.output[[7]] <- LTmeans.list$biomass.total.true
sim.output[[8]] <- g4p.vec
sim.output[[9]] <- sd.Bs
sim.output[[10]] <- yrs.bad
sim.output[[11]] <- LTmeans.list$depl
sim.output[[12]] <- avg.duration.bonanza
sim.output[[13]] <- avg.duration.collapse
sim.output[[14]] <- prob.coll
sim.output[[15]] <- severity
sim.output[[16]] <- cv.catch
sim.output[[17]] <- b.collapse
if(individual.sim==TRUE){return(sim.output)}
else{return(output)}
}
# Function to plot medians and certainty intervals from simulations:
plotintervals <- function(result.mat,ylab){ #result.mat is a matrix (e.g., biomass for results[[1]])
median.vec <- apply(result.mat,MARGIN = 2,FUN = median)
ints <- apply(result.mat,MARGIN = 2,FUN = quantile, probs = c(0.025,0.25,0.75,0.975))
lo95 <- ints[1,calc.ind]
hi95 <- ints[4,calc.ind]
lo50 <- ints[2,calc.ind]
hi50 <- ints[3,calc.ind]
plot(1:nyrs.to.use,median.vec[calc.ind],
type='l',lty=1,lwd=2,
ylim=range(lo95,hi95),
ylab = ylab,
xlab = "Year")
zz <- c(1:nyrs.to.use,tail(nyrs.to.use,n=1),rev(1:nyrs.to.use)) # for polygons
aa <- c(hi95,0,rev(lo95))
bb <- c(hi50,0,rev(lo50))
polygon(zz,aa,col=adjustcolor( "black", alpha.f = 0.2),border="NA")
polygon(zz,bb,col=adjustcolor( "black", alpha.f = 0.2),border="NA")
}
## Add an alpha value to a colour (from Mages' blog, http://www.magesblog.com/2013/04/how-to-change-alpha-value-of-colours-in.html)
add.alpha <- function(col, alpha=1){
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
# Quickly plot time series for a given scenario and simulation # ----------
plot.scenario <- function(result.ind, nyrs.to.use = 100, sim = 1){
# quickly plot basic stuff for one scenario (combo of LH traits and control rule)
to.use <- results[[result.ind]]
nsims <- nrow(to.use$biomass.oneplus.true) # just count nrows to know how many sims there are
years.test <- ncol(to.use$biomass.oneplus.true) # just count cols to know how many years there are
calc.ind <- tail(1:years.test, nyrs.to.use) # Which years to calculate median depletion over (length = nyrs.to.use)
for(i in 1:length(to.use)){ # Trim results to yrs calculating metrics for
to.use[[i]] <- to.use[[i]][,calc.ind]
}
mtu <- melt(to.use)
colnames(mtu) <- c("Sim","Year","value","variable")
toplot <- subset(mtu, Sim==sim)
ggplot(toplot,aes(x=Year,y=value)) + geom_line() + facet_wrap(~variable,scales="free_y")
}
| /Plots/SummaryFxns.R | no_license | mcsiple/ff-mse2 | R | false | false | 15,260 | r | # Functions for summarizing and plotting performance measures
# Functions for calculating performance measures ---------------------------
library(RcppRoll)
cv <- function(ts){
mean = mean(ts)
sd = sd(ts)
CV <- sd/mean
return(CV)
}
nzeroes <- function(x){ # How many years with zero catch
#x is a vector
n <- length(which(x==0))
return(n)
}
good4pred <- function(x, F0.x){ # Nyears above a "good for predators" threshold (here, it's 80% of the long term mean unfished biomass, given rec variation). This is the same as "number of bonanza years"
B.bar <- mean(F0.x)
thresh <- 0.8*B.bar
g4p <- length(which(x>thresh))
return(g4p)
}
bad4pred <- function(x, F0.x){ # Number of years that are below a certain threshold (here, it's 10% of the long term mean unfished biomass, given rec variation)
# x is a time series of biomass
B.bar <- mean(F0.x)
thresh <- 0.2*B.bar
b4p <- length(which(x<thresh))
return(b4p)
}
collapse.index <- function(x,F0.x){ # Which years have "collapses"-- use this function to ID the collapse years and look at fishing rates leading up to those collapses.
B.bar <- mean(F0.x)
thresh <- 0.2*B.bar
if(any(x<thresh)){
collapse.ind <- which(x<thresh)
}
else collapse.ind <- 0
return(collapse.ind)
}
bonanza.index <- function(x,F0.x){ # Which years have "bonanzas"-- use this function to ID the bonanza years and what conditions led up to them.
B.bar <- mean(F0.x)
thresh <- 0.8*B.bar
if(any(x>thresh)){
bonanza.ind <- which(x>thresh)
}
else bonanza.ind <- 0
return(bonanza.ind)
}
duration.collapse <- function(x, F0.x){
# This function returns the min, max, and mean duration of collapses
collapses <- collapse.index(x = x, F0.x = F0.x)
collapse_tf <- logical(length = length(F0.x))
collapse_tf[collapses] <- TRUE # change to True/False vector so you can measure duration of collapses
y <- rle(collapse_tf)
collapse.lengths <- y$lengths[y$values==TRUE]
if(length(collapse.lengths) == 0){
shortest.collapse <- NA
longest.collapse <- NA
avg.collapse.length <- NA
} else{
shortest.collapse <- min(collapse.lengths, na.rm = T)
longest.collapse <- max(collapse.lengths, na.rm = T)
avg.collapse.length <- mean(collapse.lengths, na.rm = T)
}
return(list("shortest.collapse" = shortest.collapse,
"longest.collapse" = longest.collapse,
"avg.collapse.length" = avg.collapse.length))
}
duration.bonanza <- function(x, F0.x){
# This function returns the min, max, and mean duration of bonanzas
bonanzas <- bonanza.index(x = x, F0.x = F0.x)
bonanza_tf <- logical(length = length(F0.x))
bonanza_tf[bonanzas] <- TRUE # change to True/False vector so you can measure duration of bonanzas
y <- rle(bonanza_tf)
bonanza.lengths <- y$lengths[y$values==TRUE]
if(length(bonanza.lengths) == 0){
shortest.bonanza <- NA
longest.bonanza <- NA
avg.bonanza.length <- NA
} else{
shortest.bonanza <- min(bonanza.lengths, na.rm = T)
longest.bonanza <- max(bonanza.lengths, na.rm = T)
avg.bonanza.length <- mean(bonanza.lengths, na.rm = T)
}
return(list("shortest.bonanza" = shortest.bonanza,
"longest.bonanza" = longest.bonanza,
"avg.bonanza.length" = avg.bonanza.length))
}
#mean.duration.bonanza(x=testie$biomass.total.true,F0.x = F0.Type)
n.bonafide.collapses <- function(ts.length, coll.yrs){
binary.coll.vec <- rep(0, times=ts.length)
binary.coll.vec[coll.yrs] <- 1 # Turn into vector of 0s and 1s
m <- rle(binary.coll.vec==1)
c <- m$lengths
d <- m$values
counter <- 0
if(any(d)){
starts <- which(d) # indexes rle outputs to get locations where collapses start
for(l in 1:length(starts)){
ind.coll <- starts[l]
if(c[ind.coll]>=2 & ind.coll != 1 & (ind.coll+2) < length(c)){
#if the sequence of collapse is preceded and followed by at least 4 yrs non-collapse
if(c[(ind.coll-1)]>=4 & c[ind.coll+1]>=4){counter <- counter+1} else{
if(c[ind.coll+1]<=4 & c[ind.coll+2]>=2){counter <- counter +1} else{
counter=counter}}}else{counter=counter}
# if the 4 years leading up to the collapse are not-collapsed, and same with the four years after, it's a "bonafide collapse" (later called "extended collapse")
}
# Last case: if the final years are collapsed, need to count those!
if(tail(d,1) & tail(c,1) > 4){counter=counter+1}else{counter=counter}
}
return(counter)
}
n.multiyr.closures <- function(x, threshold = NA) { #where x is a matrix, rows are sims, cols are years
count1 <- count5 <- count10 <- vector(length=nrow(x))
for(i in 1:nrow(x)){ #Either catch OR biomass
ltm <- mean(x[i,],na.rm = T)
if(is.na(threshold)){ thresh <- 0.01*ltm } # threshold can be anything - default is 1% of long term mean C or B
else{thresh = threshold}
badTorF <- x[i,] <= thresh
oneyr <- sum(roll_sum(badTorF, 1) == 1,na.rm = T)
fiveyr <- sum(roll_sum(badTorF, 5) == 5,na.rm = T)
tenyr <- sum(roll_sum(badTorF, 10) == 10,na.rm = T)
count1[i] <- oneyr # number of one-year closures
count5[i] <- fiveyr #number of five year closures
count10[i] <- tenyr # number of 10 year closures
}
return(list(count1 = count1,count5 = count5,count10 = tenyr)) # Mean number of 5- and 10-yr closures
}
# Rcpproll demo:
# set.seed(1); x <- sample(c(T, F), 100, replace = T); sum(RcppRoll::roll_sum(x, 3) == 3)
# Get summary metrics for Zeh plots or quantiles
summ.tab <- function(result.list, individual.sim = FALSE,calc.ind = calc.ind, ou.ind = NA){ #result.list is one of the results (=1 harvest rule, 1000 time series of biomass, catch, fishing, rec, depl)
# calc.ind is the index of which years to calculate over
for(i in 1:length(result.list)){
result.list[[i]] <- result.list[[i]][,calc.ind]
if(length(ou.ind)>1){result.list[[i]] <- result.list[[i]][ou.ind,]}
} # Trim results to the years we're using
LTmeans.list <- lapply(result.list,FUN = rowMeans,na.rm=TRUE)
# median and quantiles of LTM of all PMs
ltm <- lapply(LTmeans.list,FUN = quantile, probs = c(0.05,0.5,0.95),na.rm=TRUE)
# mean nonzero catch
catch <- result.list$total.catch
nonzero.catch <- ifelse(catch<0.1,NA,catch)
ltm.nzc1 <- rowMeans(nonzero.catch,na.rm=TRUE)
ltm.nzc2 <- quantile(ltm.nzc1,probs = c(0.05,0.5,0.95),na.rm = TRUE)
# SDcatch
sd.catches <- apply(catch, MARGIN = 1,FUN = sd, na.rm = TRUE)
sd.catch <- quantile(sd.catches,probs = c(0.05,0.5,0.95))
#Number of years w zero catch
nz1 <- apply(catch,MARGIN = 1,FUN = nzeroes)
#5- and 10-yr closures
n.5yrclose <- n.multiyr.closures(catch,threshold = 0)$count5 / nz1 # P(5yr closure | closure)
n.10yrclose <- n.multiyr.closures(catch,threshold = 0)$count10 / n.5yrclose # P(10yr closure | 5yr closure) - depracated
# SDbiomass
true.biomass <- result.list$biomass.total.true
sd.Bs <- apply(true.biomass, MARGIN = 1,FUN = sd, na.rm = TRUE)
sd.B <- quantile(sd.Bs,probs = c(0.05,0.5,0.95))
#Years that are "good for predators"
g4p.vec <- vector()
for(sim in 1:nrow(true.biomass)){
g4p.vec[sim] <- good4pred(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim])
}
# Number of years that are below a predator threshold
yrs.bad <- vector()
for(sim in 1:nrow(true.biomass)){
yrs.bad[sim] <- bad4pred(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])
}
# Maximum duration of collapse
max.duration.collapse = min.duration.collapse = avg.duration.collapse <- vector()
max.duration.bonanza = min.duration.bonanza = avg.duration.bonanza <- vector()
prob.collapse = collapse.severity = cv.vec = bonafide.collapse = vector()
depl <- matrix(nrow=nrow(true.biomass),ncol=ncol(true.biomass))
for(sim in 1:nrow(true.biomass)){ # Each one = 1 simulation
max.duration.collapse[sim] <- duration.collapse(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$longest.collapse
min.duration.collapse[sim] <- duration.collapse(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$shortest.collapse
avg.duration.collapse[sim] <- duration.collapse(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$avg.collapse.length
max.duration.bonanza[sim] <- duration.bonanza(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$longest.bonanza
min.duration.bonanza[sim] <- duration.bonanza(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$shortest.bonanza
avg.duration.bonanza[sim] <- duration.bonanza(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])$avg.bonanza.length
# Probability of collapse
coll.yrs <- collapse.index(x = true.biomass[sim,],F0.x = result.list$no.fishing.tb[sim,])
prob.collapse[sim] <- length(coll.yrs) / length(true.biomass[sim,])
# Severity of collapse (as a percentage of mean unfished biomass)
coll.thresh <- mean(result.list$no.fishing.tb[sim,]) * 0.2
collapse.yrs <- true.biomass[sim,coll.yrs]
collapse.severity[sim] <- 1 - (mean(collapse.yrs,na.rm=T) / coll.thresh )
# median biomass at collapse, over the median unfished biomass for that simulation - a higher value of that is "better" (less severe) so substract it from 1 to get higher number == worse severity.
#Depletion based on unfished B0
depl[sim,] <- true.biomass[sim,]/result.list$no.fishing.tb[sim,]
#CV of catches
cv.vec[sim] <- cv(catch[sim,])
# N "bonafide" collapse periods (this is different than dipping below 0.2Bbar, you have to have a streak of non-collapse years before and after)
bonafide.collapse[sim] <- n.bonafide.collapses(ts.length = length(true.biomass[sim,]),coll.yrs = coll.yrs)
} #end of sim loop
mean.depl <- apply(depl,MARGIN = 1,FUN = mean)
# Performance metrics
interval <- c(0.05,0.5,0.95)
ltm.c <- ltm$total.catch # ltmcatch
ltm.nzc <- ltm.nzc2 #ltmnonzerocatch
SDcatch <- sd.catch #SD(Catch)
n5yr <- quantile(n.5yrclose,probs = interval, na.rm=TRUE) #n.5yrclose
n10yr <- quantile(n.10yrclose,probs = interval, na.rm=TRUE) #n.10yrclose
nz <- quantile(nz1,probs = interval) #nyrs.0catch
ltm.b <- ltm$biomass.total.true #LTMBiomass
g4p <- quantile(g4p.vec,probs = interval) #Nyears "good for predator"
sdB <- sd.B #SD(Biomass)
b4p <- quantile(yrs.bad,probs = interval) #p(bad4preds)
ltm.depl <- quantile(depl, probs = interval,na.rm=T) # Mean depletion
prob.coll <- quantile(prob.collapse,probs = interval,na.rm=T)
severity <- quantile(collapse.severity,probs = interval,na.rm=T)
cv.catch <- quantile(cv.vec,probs = interval, na.rm=T)
b.collapse <- quantile(bonafide.collapse,probs = interval, na.rm=T)
# Awkward but necessary for when there are NAs in these vectors
if(all(is.na(max.duration.collapse))){
overall.max.coll.len <- rep(NA,times=3)
}else{
overall.max.coll.len <- c(NA, max(max.duration.collapse,na.rm=T), NA) #Fill quantiles w NAs because other metrics have quantiles and these don't!
}
# Ugh
if(all(is.na(max.duration.bonanza))){
overall.max.bon.len <- rep(NA,times=3)
}else{
overall.max.bon.len <- c(NA, max(max.duration.bonanza,na.rm=T), NA)
}
# Sigh
if(all(is.na(avg.duration.bonanza))){
bon.length <- rep(NA,times=3)
} else{
bon.length <- quantile(avg.duration.bonanza,probs = interval,na.rm = T)
}
# whatever
if(all(is.na(avg.duration.collapse))){
coll.length <- rep(NA,times=3)
} else{
coll.length <- quantile(avg.duration.collapse,probs = interval, na.rm = T)
}
output <- data.frame(PM = performance.measures, loCI = NA, med = NA, hiCI = NA)
output[,-1] <- rbind(ltm.c,ltm.nzc,SDcatch,n5yr,n10yr,nz,ltm.b,g4p,sdB,b4p,ltm.depl,overall.max.coll.len,overall.max.bon.len,bon.length,coll.length,prob.coll,severity,cv.catch,b.collapse)
sim.output <- list()
#LTmean catch
#LTmean nonzero catch
#SDcatch
#n5yr
#n10yr
#yrs w zero catch
#LTmean biomass
#yrs good4preds
#SDbiomass
#yrs bad4preds
#ltm depletion
#max collapse length (across ALL SIMULATIONS) - do not use in correlations
#max bonanza length (across ALL SIMULATIONS) - do not use in correlations
#mean bonanza length
#mean collapse length
#probability of a collapse
#collapse severity
#cv(catch)
#sustained collapse
sim.output[[1]] <- LTmeans.list$total.catch
sim.output[[2]] <- ltm.nzc1
sim.output[[3]] <- sd.catches
sim.output[[4]] <- n.5yrclose
sim.output[[5]] <- n.10yrclose
sim.output[[6]] <- nz1
sim.output[[7]] <- LTmeans.list$biomass.total.true
sim.output[[8]] <- g4p.vec
sim.output[[9]] <- sd.Bs
sim.output[[10]] <- yrs.bad
sim.output[[11]] <- LTmeans.list$depl
sim.output[[12]] <- avg.duration.bonanza
sim.output[[13]] <- avg.duration.collapse
sim.output[[14]] <- prob.coll
sim.output[[15]] <- severity
sim.output[[16]] <- cv.catch
sim.output[[17]] <- b.collapse
if(individual.sim==TRUE){return(sim.output)}
else{return(output)}
}
# Function to plot medians and certainty intervals from simulations:
plotintervals <- function(result.mat,ylab){ #result.mat is a matrix (e.g., biomass for results[[1]])
median.vec <- apply(result.mat,MARGIN = 2,FUN = median)
ints <- apply(result.mat,MARGIN = 2,FUN = quantile, probs = c(0.025,0.25,0.75,0.975))
lo95 <- ints[1,calc.ind]
hi95 <- ints[4,calc.ind]
lo50 <- ints[2,calc.ind]
hi50 <- ints[3,calc.ind]
plot(1:nyrs.to.use,median.vec[calc.ind],
type='l',lty=1,lwd=2,
ylim=range(lo95,hi95),
ylab = ylab,
xlab = "Year")
zz <- c(1:nyrs.to.use,tail(nyrs.to.use,n=1),rev(1:nyrs.to.use)) # for polygons
aa <- c(hi95,0,rev(lo95))
bb <- c(hi50,0,rev(lo50))
polygon(zz,aa,col=adjustcolor( "black", alpha.f = 0.2),border="NA")
polygon(zz,bb,col=adjustcolor( "black", alpha.f = 0.2),border="NA")
}
## Add an alpha value to a colour (from Mages' blog, http://www.magesblog.com/2013/04/how-to-change-alpha-value-of-colours-in.html)
add.alpha <- function(col, alpha=1){
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
# Quickly plot time series for a given scenario and simulation # ----------
plot.scenario <- function(result.ind, nyrs.to.use = 100, sim = 1){
# quickly plot basic stuff for one scenario (combo of LH traits and control rule)
to.use <- results[[result.ind]]
nsims <- nrow(to.use$biomass.oneplus.true) # just count nrows to know how many sims there are
years.test <- ncol(to.use$biomass.oneplus.true) # just count cols to know how many years there are
calc.ind <- tail(1:years.test, nyrs.to.use) # Which years to calculate median depletion over (length = nyrs.to.use)
for(i in 1:length(to.use)){ # Trim results to yrs calculating metrics for
to.use[[i]] <- to.use[[i]][,calc.ind]
}
mtu <- melt(to.use)
colnames(mtu) <- c("Sim","Year","value","variable")
toplot <- subset(mtu, Sim==sim)
ggplot(toplot,aes(x=Year,y=value)) + geom_line() + facet_wrap(~variable,scales="free_y")
}
|
fcn_filename_maker<- function(scaleVset=scaleVset, option=option ) {
cc0 = ""
for (ii in c(1:length(scaleVset))) cc0 = paste(cc0,scaleVset[ii],sep="")
cc1 = ""
if (option$ye_trunc) cc1 = paste(cc1,"ye",sep="")
if (option$yf_trunc) cc1 = paste(cc1,"yf",sep="")
if (option$yef_trunc) cc1 = paste(cc1,"yef",sep="")
cc1 = paste(cc1,substr(option$truncate,1,4),sep="_")
cc1 = paste(cc1,substr(as.character(option$truncate_crt*10000),1,4),sep="_")
filename = sprintf("TS_firm_data_%s_%sbp.Rdata",cc0, cc1)
return(filename)
} | /code/functions/fcn_filename_maker.R | no_license | jamesyae/LTY-JFE-2016 | R | false | false | 537 | r | fcn_filename_maker<- function(scaleVset=scaleVset, option=option ) {
cc0 = ""
for (ii in c(1:length(scaleVset))) cc0 = paste(cc0,scaleVset[ii],sep="")
cc1 = ""
if (option$ye_trunc) cc1 = paste(cc1,"ye",sep="")
if (option$yf_trunc) cc1 = paste(cc1,"yf",sep="")
if (option$yef_trunc) cc1 = paste(cc1,"yef",sep="")
cc1 = paste(cc1,substr(option$truncate,1,4),sep="_")
cc1 = paste(cc1,substr(as.character(option$truncate_crt*10000),1,4),sep="_")
filename = sprintf("TS_firm_data_%s_%sbp.Rdata",cc0, cc1)
return(filename)
} |
## Exploratory Data Analysis Course Project
## Plot 5
if (!exists("NEI") || !exists("SCC") ) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",
destfile = "data.zip")
unzip("data.zip", overwrite = TRUE)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
}
## Get list of SCC for motor vehicle sources
SCC_motor <- SCC[grepl("Vehicles", SCC$SCC.Level.Two),]
scc_list <- SCC_motor$SCC
## Filter NEI list on SCC and reduce to Baltimore City
NEI_motor <- subset(NEI, NEI$SCC %in% scc_list)
NEI_motor <- subset(NEI_motor, NEI_motor$fips == "24510")
## Plot by year and total sum
if (!exists("NEI_motor_sum")) {
NEI_motor_sum <- aggregate(Emissions ~ year, NEI_motor, sum)
}
png(filename = "plot5.png")
plot(NEI_motor_sum$year,
NEI_motor_sum$Emissions, pch = 20,
main = "Total Motor Vehicle emissions by year in Baltimore",
xlab = "Year",
ylab = "Plot 5: Total Motor Vehicle Emissions")
dev.off() | /plot5.R | no_license | FathersNelsons/ExploratoryDataAnalysisCourseProject | R | false | false | 1,108 | r | ## Exploratory Data Analysis Course Project
## Plot 5
if (!exists("NEI") || !exists("SCC") ) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",
destfile = "data.zip")
unzip("data.zip", overwrite = TRUE)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
}
## Get list of SCC for motor vehicle sources
SCC_motor <- SCC[grepl("Vehicles", SCC$SCC.Level.Two),]
scc_list <- SCC_motor$SCC
## Filter NEI list on SCC and reduce to Baltimore City
NEI_motor <- subset(NEI, NEI$SCC %in% scc_list)
NEI_motor <- subset(NEI_motor, NEI_motor$fips == "24510")
## Plot by year and total sum
if (!exists("NEI_motor_sum")) {
NEI_motor_sum <- aggregate(Emissions ~ year, NEI_motor, sum)
}
png(filename = "plot5.png")
plot(NEI_motor_sum$year,
NEI_motor_sum$Emissions, pch = 20,
main = "Total Motor Vehicle emissions by year in Baltimore",
xlab = "Year",
ylab = "Plot 5: Total Motor Vehicle Emissions")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotNpde-plotFunctions.R
\name{npde.plot.vpc}
\alias{npde.plot.vpc}
\title{Visual Predictive Check (VPC)}
\usage{
npde.plot.vpc(npdeObject, npc=FALSE, ...)
}
\arguments{
\item{npdeObject}{an object returned by a call to \code{\link{npde}} or \code{\link{autonpde}}}
\item{npc}{a boolean, indicating whether to compute Numerical Predictive Checks (not yet implemented)}
\item{\dots}{additional arguments to be passed on to the function, to control which metric (npde, pd, npd) is used or to override graphical parameters (see the PDF document for details, as well as \code{\link{set.plotoptions}})}
}
\description{
Produces a VPC plot for the data using the simulated data provided. Note that non-stratified VPC are not suited to unbalanced designs
when features such as dose or covariates enter the model. We suggest using reference profiles instead to retain a VPC-like profile
while ensuring meaningful prediction intervals (Comets et al. 2013).
}
\references{
K. Brendel, E. Comets, C. Laffont, C. Laveille, and F. Mentre. Metrics for external model evaluation with an application to the
population pharmacokinetics of gliclazide. \emph{Pharmaceutical Research}, 23:2036--49, 2006.
E. Comets, THT Nguyen, F. Mentré. Additional features and graphs in the new npde library for R.
\emph{22nd PAGE meeting, Glasgow, UK}, 2013.
}
\seealso{
\code{\link{npde}}, \code{\link{autonpde}}, \code{\link{set.plotoptions}}
}
\author{
Emmanuelle Comets <emmanuelle.comets@bichat.inserm.fr>
}
\keyword{plot}
| /man/npde.plot.vpc.Rd | no_license | ecomets/npde20 | R | false | true | 1,579 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotNpde-plotFunctions.R
\name{npde.plot.vpc}
\alias{npde.plot.vpc}
\title{Visual Predictive Check (VPC)}
\usage{
npde.plot.vpc(npdeObject, npc=FALSE, ...)
}
\arguments{
\item{npdeObject}{an object returned by a call to \code{\link{npde}} or \code{\link{autonpde}}}
\item{npc}{a boolean, indicating whether to compute Numerical Predictive Checks (not yet implemented)}
\item{\dots}{additional arguments to be passed on to the function, to control which metric (npde, pd, npd) is used or to override graphical parameters (see the PDF document for details, as well as \code{\link{set.plotoptions}})}
}
\description{
Produces a VPC plot for the data using the simulated data provided. Note that non-stratified VPC are not suited to unbalanced designs
when features such as dose or covariates enter the model. We suggest using reference profiles instead to retain a VPC-like profile
while ensuring meaningful prediction intervals (Comets et al. 2013).
}
\references{
K. Brendel, E. Comets, C. Laffont, C. Laveille, and F. Mentre. Metrics for external model evaluation with an application to the
population pharmacokinetics of gliclazide. \emph{Pharmaceutical Research}, 23:2036--49, 2006.
E. Comets, THT Nguyen, F. Mentré. Additional features and graphs in the new npde library for R.
\emph{22nd PAGE meeting, Glasgow, UK}, 2013.
}
\seealso{
\code{\link{npde}}, \code{\link{autonpde}}, \code{\link{set.plotoptions}}
}
\author{
Emmanuelle Comets <emmanuelle.comets@bichat.inserm.fr>
}
\keyword{plot}
|
library(riem)
#riem_networks()
riem_stations("NF__ASOS") | /src/Kiwi_Weather.func.R | no_license | donaldan/World-Weather_c994373987f3 | R | false | false | 57 | r | library(riem)
#riem_networks()
riem_stations("NF__ASOS") |
################ EXPERIMENT 2 ##########################
#
# This script analyzes the results for experiment 2
#
########################################################
#------------------ LIBRARIES --------------------------#
install.packages("ggplot2")
library(ggplot2)
install.packages("boot")
library(boot)
install.packages("gridExtra")
library(gridExtra)
#-------------------------------------------------------#
#------------------ NAMES AND LABELS -------------------#
### short names for columns
crowd <- 'diff_crowding'
incrowd <- 'diff_innercrowding'
deform <- 'diff_deformation'
norm_error <- 'normalized_error'
pLabeled <- 'correct_targets_percent'
pClicked <- 'common_targets_percent'
labeled <- 'correct_targets'
clicked <- 'common_targets'
mislabeled <- 'targets_switch'
### text labels for charts
lDeform <- 'Deformation'
lIncrowd <- 'Inner crowding'
lCrowd <- 'Crowding'
lNorm_error <- 'Normalized error'
lPLabeled <- 'Accuracy (correct labeled / total targets)'
lPClicked <- 'Accuracy click (correct clicked / total targets)'
lPMislabeled <- 'Mislabeling (mislabeled / total targets)'
lPMislabeledPerClicks <- 'Mislabeling (mislabeled / correct clicked)'
lTaskA <- 'No ID'
lTaskB <- 'ID'
#-------------------------------------------------------#
#------------ LOAD AND CHECK DATA ----------------------#
logs <- "staggeredAnimation-xp2-logs.csv"
NB_PARTICIPANTS = 4 ## For sanity check
NB_REPS = 5
NB_BLOCKS = 16
NB_TARGETS = 3
data = read.csv(logs, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
### SANITY CHECK ###
if (length(data[,1]) != NB_PARTICIPANTS*NB_REPS*NB_BLOCKS)
cat("Warning: wrong number of trials")
#-------------------------------------------------------#
#--------------- USEFUL FUNCTIONS ----------------------#
##### get subset corresponding to a difficulty level 'level' of a measure 'measure'
getSubsetByDifficulty <- function(x, measure, level) {
result <- x[x[,measure]==level,]
if (length(result[,1]) != NB_PARTICIPANTS*NB_REPS*NB_CONDITIONS/2) ### sanity check
cat("Warning: wrong number of trials in getSubsetByDifficulty")
return (result)
}
## Exemple: call getSubsetByDifficulty(condA, innercrowd, 'E')
aggregateParticipant <- function(x,measure,f) {
if (f=="mean")
return (aggregateMean(x,measure))
else
return (aggregateMedian(x,measure))
}
###### get mean per participant
aggregateMean <- function(x, measure) {
aggdata <- aggregate(x[,measure] ~ participant, data = x, mean)
return (aggdata)
}
###### get median per participant
aggregateMedian <- function(x, measure) {
aggdata <- aggregate(x[,measure] ~ participant, data = x, median)
return (aggdata)
}
##### get global percent of success
getPercent <- function(x, measure) {
aggdata <- aggregate(x[,measure] ~ participant, data = x, sum)
aggdata[,2] <- aggdata[,2]/48
return (aggdata)
}
### get mean of a vector by Measure
getMean <- function(x,measure) {
mean(x[,measure])
}
### get median of a vector by Measure
getMedian <- function(x,measure) {
return(median(x[,measure]))
}
### get confidence interval
getCI <- function(x,f) {
if (f == "mean")
return(getCIMean(x))
else # median
return(getCIMedian(x))
}
### get Confidence interval (mean)
getCIMean <- function(x) {
number.measures <- length(x)
number.samples <- 5000
sample.mean <- function(x, index) {
return(mean(x[index]))
}
boot.object <- boot(x, sample.mean, R = number.samples)
confidence.intervals <- quantile(boot.object$t, c(0.025, 0.975)) #the samples are in boot.object$t
boot.object$t0 # the mean
return (confidence.intervals)
}
### get Confidence interval (median)
getCIMedian <- function(x) {
number.measures <- length(x)
number.samples <- 5000
sample.median <- function(x, index) {
return(median(x[index]))
}
boot.object <- boot(x, sample.median, R = number.samples)
confidence.intervals <- quantile(boot.object$t, c(0.025, 0.975)) #the samples are in boot.object$t
boot.object$t0 # the median
return (confidence.intervals)
}
## get Lower value of a CI,
# Param: column=<normalized_error, accuracy>, f=<"mean", "median">
getLowerCI <- function(x, f) {
ci <- getCI(x,f)
ci <- as.data.frame(ci)
return(ci[1,1])
}
getUpperCI <- function(x, f) {
ci <- getCI(x,f)
ci <- as.data.frame(ci)
return(ci[2,1])
}
#-------------------------------------------------------#
#--------------- FUNCTIONS FOR PLOTS ----------------------#
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1 <- off1$normalized_error - on1$normalized_error
diff2 <- off2$normalized_error - on2$normalized_error
diff3 <- off3$normalized_error - on3$normalized_error
diff4 <- off4$normalized_error - on4$normalized_error
diff5 <- off5$normalized_error - on5$normalized_error
diff6 <- off6$normalized_error - on6$normalized_error
diff7 <- off7$normalized_error - on7$normalized_error
diff8 <- off8$normalized_error - on8$normalized_error
#-------------------------------------------------------#
### LOGS PARTICIPANTS
logsP01 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p01/results/results_p01.csv"
logsP02 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p02/results/results_p02.csv"
logsP03 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p03/results/results_p03.csv"
logsP04 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p04/results/results_p04.csv"
logsP05 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p05/results/results_p05.csv"
logsP06 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p06/results/results_p06.csv"
logsP07 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p07/results/results_p07.csv"
logsP08 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p08/results/results_p08.csv"
logsP09 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p09/results/results_p09.csv"
logsP10 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p10/results/results_p10.csv"
logsP11 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p11/results/results_p11.csv"
logsP12 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p12/results/results_p12.csv"
logsP13 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p13/results/results_p13.csv"
logsP14 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p14/results/results_p14.csv"
logsP15 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p15/results/results_p15.csv"
logsP16 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p16/results/results_p16.csv"
logsP17 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p17/results/results_p17.csv"
logsP18 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p18/results/results_p18.csv"
logsP19 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p19/results/results_p19.csv"
logsP20 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p20/results/results_p20.csv"
## P01
data = read.csv(logsP01, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P01 <- on1$normalized_error - off1$normalized_error
diff2_P01 <- on2$normalized_error - off2$normalized_error
diff3_P01 <- on3$normalized_error - off3$normalized_error
diff4_P01 <- on4$normalized_error - off4$normalized_error
diff5_P01 <- on5$normalized_error - off5$normalized_error
diff6_P01 <- on6$normalized_error - off6$normalized_error
diff7_P01 <- on7$normalized_error - off7$normalized_error
diff8_P01 <- on8$normalized_error - off8$normalized_error
## P02
data = read.csv(logsP02, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P02 <- on1$normalized_error - off1$normalized_error
diff2_P02 <- on2$normalized_error - off2$normalized_error
diff3_P02 <- on3$normalized_error - off3$normalized_error
diff4_P02 <- on4$normalized_error - off4$normalized_error
diff5_P02 <- on5$normalized_error - off5$normalized_error
diff6_P02 <- on6$normalized_error - off6$normalized_error
diff7_P02 <- on7$normalized_error - off7$normalized_error
diff8_P02 <- on8$normalized_error - off8$normalized_error
## P03
data = read.csv(logsP03, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P03 <- on1$normalized_error - off1$normalized_error
diff2_P03 <- on2$normalized_error - off2$normalized_error
diff3_P03 <- on3$normalized_error - off3$normalized_error
diff4_P03 <- on4$normalized_error - off4$normalized_error
diff5_P03 <- on5$normalized_error - off5$normalized_error
diff6_P03 <- on6$normalized_error - off6$normalized_error
diff7_P03 <- on7$normalized_error - off7$normalized_error
diff8_P03 <- on8$normalized_error - off8$normalized_error
## P04
data = read.csv(logsP04, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P04 <- on1$normalized_error - off1$normalized_error
diff2_P04 <- on2$normalized_error - off2$normalized_error
diff3_P04 <- on3$normalized_error - off3$normalized_error
diff4_P04 <- on4$normalized_error - off4$normalized_error
diff5_P04 <- on5$normalized_error - off5$normalized_error
diff6_P04 <- on6$normalized_error - off6$normalized_error
diff7_P04 <- on7$normalized_error - off7$normalized_error
diff8_P04 <- on8$normalized_error - off8$normalized_error
## P05
data = read.csv(logsP05, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P05 <- on1$normalized_error - off1$normalized_error
diff2_P05 <- on2$normalized_error - off2$normalized_error
diff3_P05 <- on3$normalized_error - off3$normalized_error
diff4_P05 <- on4$normalized_error - off4$normalized_error
diff5_P05 <- on5$normalized_error - off5$normalized_error
diff6_P05 <- on6$normalized_error - off6$normalized_error
diff7_P05 <- on7$normalized_error - off7$normalized_error
diff8_P05 <- on8$normalized_error - off8$normalized_error
## P06
data = read.csv(logsP06, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P06 <- on1$normalized_error - off1$normalized_error
diff2_P06 <- on2$normalized_error - off2$normalized_error
diff3_P06 <- on3$normalized_error - off3$normalized_error
diff4_P06 <- on4$normalized_error - off4$normalized_error
diff5_P06 <- on5$normalized_error - off5$normalized_error
diff6_P06 <- on6$normalized_error - off6$normalized_error
diff7_P06 <- on7$normalized_error - off7$normalized_error
diff8_P06 <- on8$normalized_error - off8$normalized_error
## P07
data = read.csv(logsP07, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P07 <- on1$normalized_error - off1$normalized_error
diff2_P07 <- on2$normalized_error - off2$normalized_error
diff3_P07 <- on3$normalized_error - off3$normalized_error
diff4_P07 <- on4$normalized_error - off4$normalized_error
diff5_P07 <- on5$normalized_error - off5$normalized_error
diff6_P07 <- on6$normalized_error - off6$normalized_error
diff7_P07 <- on7$normalized_error - off7$normalized_error
diff8_P07 <- on8$normalized_error - off8$normalized_error
## P08
data = read.csv(logsP08, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P08 <- on1$normalized_error - off1$normalized_error
diff2_P08 <- on2$normalized_error - off2$normalized_error
diff3_P08 <- on3$normalized_error - off3$normalized_error
diff4_P08 <- on4$normalized_error - off4$normalized_error
diff5_P08 <- on5$normalized_error - off5$normalized_error
diff6_P08 <- on6$normalized_error - off6$normalized_error
diff7_P08 <- on7$normalized_error - off7$normalized_error
diff8_P08 <- on8$normalized_error - off8$normalized_error
## P09
data = read.csv(logsP09, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P09 <- on1$normalized_error - off1$normalized_error
diff2_P09 <- on2$normalized_error - off2$normalized_error
diff3_P09 <- on3$normalized_error - off3$normalized_error
diff4_P09 <- on4$normalized_error - off4$normalized_error
diff5_P09 <- on5$normalized_error - off5$normalized_error
diff6_P09 <- on6$normalized_error - off6$normalized_error
diff7_P09 <- on7$normalized_error - off7$normalized_error
diff8_P09 <- on8$normalized_error - off8$normalized_error
## P10
data = read.csv(logsP10, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P10 <- on1$normalized_error - off1$normalized_error
diff2_P10 <- on2$normalized_error - off2$normalized_error
diff3_P10 <- on3$normalized_error - off3$normalized_error
diff4_P10 <- on4$normalized_error - off4$normalized_error
diff5_P10 <- on5$normalized_error - off5$normalized_error
diff6_P10 <- on6$normalized_error - off6$normalized_error
diff7_P10 <- on7$normalized_error - off7$normalized_error
diff8_P10 <- on8$normalized_error - off8$normalized_error
## P11
data = read.csv(logsP11, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P11 <- on1$normalized_error - off1$normalized_error
diff2_P11 <- on2$normalized_error - off2$normalized_error
diff3_P11 <- on3$normalized_error - off3$normalized_error
diff4_P11 <- on4$normalized_error - off4$normalized_error
diff5_P11 <- on5$normalized_error - off5$normalized_error
diff6_P11 <- on6$normalized_error - off6$normalized_error
diff7_P11 <- on7$normalized_error - off7$normalized_error
diff8_P11 <- on8$normalized_error - off8$normalized_error
## P12
data = read.csv(logsP12, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P12 <- on1$normalized_error - off1$normalized_error
diff2_P12 <- on2$normalized_error - off2$normalized_error
diff3_P12 <- on3$normalized_error - off3$normalized_error
diff4_P12 <- on4$normalized_error - off4$normalized_error
diff5_P12 <- on5$normalized_error - off5$normalized_error
diff6_P12 <- on6$normalized_error - off6$normalized_error
diff7_P12 <- on7$normalized_error - off7$normalized_error
diff8_P12 <- on8$normalized_error - off8$normalized_error
## P13
data = read.csv(logsP13, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P13 <- on1$normalized_error - off1$normalized_error
diff2_P13 <- on2$normalized_error - off2$normalized_error
diff3_P13 <- on3$normalized_error - off3$normalized_error
diff4_P13 <- on4$normalized_error - off4$normalized_error
diff5_P13 <- on5$normalized_error - off5$normalized_error
diff6_P13 <- on6$normalized_error - off6$normalized_error
diff7_P13 <- on7$normalized_error - off7$normalized_error
diff8_P13 <- on8$normalized_error - off8$normalized_error
## P14
data = read.csv(logsP14, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P14 <- on1$normalized_error - off1$normalized_error
diff2_P14 <- on2$normalized_error - off2$normalized_error
diff3_P14 <- on3$normalized_error - off3$normalized_error
diff4_P14 <- on4$normalized_error - off4$normalized_error
diff5_P14 <- on5$normalized_error - off5$normalized_error
diff6_P14 <- on6$normalized_error - off6$normalized_error
diff7_P14 <- on7$normalized_error - off7$normalized_error
diff8_P14 <- on8$normalized_error - off8$normalized_error
## P15
data = read.csv(logsP15, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P15 <- on1$normalized_error - off1$normalized_error
diff2_P15 <- on2$normalized_error - off2$normalized_error
diff3_P15 <- on3$normalized_error - off3$normalized_error
diff4_P15 <- on4$normalized_error - off4$normalized_error
diff5_P15 <- on5$normalized_error - off5$normalized_error
diff6_P15 <- on6$normalized_error - off6$normalized_error
diff7_P15 <- on7$normalized_error - off7$normalized_error
diff8_P15 <- on8$normalized_error - off8$normalized_error
## P16
data = read.csv(logsP16, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P16 <- on1$normalized_error - off1$normalized_error
diff2_P16 <- on2$normalized_error - off2$normalized_error
diff3_P16 <- on3$normalized_error - off3$normalized_error
diff4_P16 <- on4$normalized_error - off4$normalized_error
diff5_P16 <- on5$normalized_error - off5$normalized_error
diff6_P16 <- on6$normalized_error - off6$normalized_error
diff7_P16 <- on7$normalized_error - off7$normalized_error
diff8_P16 <- on8$normalized_error - off8$normalized_error
## P17
data = read.csv(logsP17, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P17 <- on1$normalized_error - off1$normalized_error
diff2_P17 <- on2$normalized_error - off2$normalized_error
diff3_P17 <- on3$normalized_error - off3$normalized_error
diff4_P17 <- on4$normalized_error - off4$normalized_error
diff5_P17 <- on5$normalized_error - off5$normalized_error
diff6_P17 <- on6$normalized_error - off6$normalized_error
diff7_P17 <- on7$normalized_error - off7$normalized_error
diff8_P17 <- on8$normalized_error - off8$normalized_error
## P18
data = read.csv(logsP18, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P18 <- on1$normalized_error - off1$normalized_error
diff2_P18 <- on2$normalized_error - off2$normalized_error
diff3_P18 <- on3$normalized_error - off3$normalized_error
diff4_P18 <- on4$normalized_error - off4$normalized_error
diff5_P18 <- on5$normalized_error - off5$normalized_error
diff6_P18 <- on6$normalized_error - off6$normalized_error
diff7_P18 <- on7$normalized_error - off7$normalized_error
diff8_P18 <- on8$normalized_error - off8$normalized_error
## P19
data = read.csv(logsP19, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P19 <- on1$normalized_error - off1$normalized_error
diff2_P19 <- on2$normalized_error - off2$normalized_error
diff3_P19 <- on3$normalized_error - off3$normalized_error
diff4_P19 <- on4$normalized_error - off4$normalized_error
diff5_P19 <- on5$normalized_error - off5$normalized_error
diff6_P19 <- on6$normalized_error - off6$normalized_error
diff7_P19 <- on7$normalized_error - off7$normalized_error
diff8_P19 <- on8$normalized_error - off8$normalized_error
## P20
data = read.csv(logsP20, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P20 <- on1$normalized_error - off1$normalized_error
diff2_P20 <- on2$normalized_error - off2$normalized_error
diff3_P20 <- on3$normalized_error - off3$normalized_error
diff4_P20 <- on4$normalized_error - off4$normalized_error
diff5_P20 <- on5$normalized_error - off5$normalized_error
diff6_P20 <- on6$normalized_error - off6$normalized_error
diff7_P20 <- on7$normalized_error - off7$normalized_error
diff8_P20 <- on8$normalized_error - off8$normalized_error
d1 <- c(diff1_P01, diff5_P01,
diff1_P02, diff5_P02,
diff1_P03, diff5_P03,
diff1_P04, diff5_P04,
diff1_P05, diff5_P05,
diff1_P06, diff5_P06,
diff1_P07, diff5_P07,
diff1_P08, diff5_P08,
diff1_P09, diff5_P09,
diff1_P10, diff5_P10,
diff1_P11, diff5_P11,
diff1_P12, diff5_P12,
diff1_P13, diff5_P13,
diff1_P14, diff5_P14,
diff1_P15, diff5_P15,
diff1_P16, diff5_P16,
diff1_P17, diff5_P17,
diff1_P18, diff5_P18,
diff1_P19, diff5_P19,
diff1_P20, diff5_P20)
d2 <- c(diff2_P01, diff6_P01,
diff2_P02, diff6_P02,
diff2_P03, diff6_P03,
diff2_P04, diff6_P04,
diff2_P05, diff6_P05,
diff2_P06, diff6_P06,
diff2_P07, diff6_P07,
diff2_P08, diff6_P08,
diff2_P09, diff6_P09,
diff2_P10, diff6_P10,
diff2_P11, diff6_P11,
diff2_P12, diff6_P12,
diff2_P13, diff6_P13,
diff2_P14, diff6_P14,
diff2_P15, diff6_P15,
diff2_P16, diff6_P16,
diff2_P17, diff6_P17,
diff2_P18, diff6_P18,
diff2_P19, diff6_P19,
diff2_P20, diff6_P20)
d3 <- c(diff3_P01, diff7_P01,
diff3_P02, diff7_P02,
diff3_P03, diff7_P03,
diff3_P04, diff7_P04,
diff3_P05, diff7_P05,
diff3_P06, diff7_P06,
diff3_P07, diff7_P07,
diff3_P08, diff7_P08,
diff3_P09, diff7_P09,
diff3_P10, diff7_P10,
diff3_P11, diff7_P11,
diff3_P12, diff7_P12,
diff3_P13, diff7_P13,
diff3_P14, diff7_P14,
diff3_P15, diff7_P15,
diff3_P16, diff7_P16,
diff3_P17, diff7_P17,
diff3_P18, diff7_P18,
diff3_P19, diff7_P19,
diff3_P20, diff7_P20)
d4 <- c(diff4_P01, diff8_P01,
diff4_P02, diff8_P02,
diff4_P03, diff8_P03,
diff4_P04, diff8_P04,
diff4_P05, diff8_P05,
diff4_P06, diff8_P06,
diff4_P07, diff8_P07,
diff4_P08, diff8_P08,
diff4_P09, diff8_P09,
diff4_P10, diff8_P10,
diff4_P11, diff8_P11,
diff4_P12, diff8_P12,
diff4_P13, diff8_P13,
diff4_P14, diff8_P14,
diff4_P15, diff8_P15,
diff4_P16, diff8_P16,
diff4_P17, diff8_P17,
diff4_P18, diff8_P18,
diff4_P19, diff8_P19,
diff4_P20, diff8_P20)
pdf(file = "/Users/fanny/Desktop/xp2-swarms-ErrorIdentification.pdf", width=9, height=9)
par(mfrow=c(2,2))
boxplot(d1, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Slow')
beeswarm(d1,
col = "darkgoldenrod1", add = TRUE)
boxplot(d2, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Fast')
beeswarm(d2,
col = "orange3", add = TRUE)
boxplot(d3, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Slow')
beeswarm(d3,
col = "darkgoldenrod1", add = TRUE)
boxplot(d4, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Fast')
beeswarm(d4,
col = "orange3", add = TRUE)
dev.off()
#### Normalized error as if A
## P01
data = read.csv(logsP01, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P01 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P01 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P01 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P01 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P01 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P01 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P01 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P01 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P02
data = read.csv(logsP02, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P02 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P02 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P02 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P02 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P02 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P02 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P02 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P02 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P03
data = read.csv(logsP03, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P03 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P03 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P03 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P03 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P03 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P03 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P03 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P03 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P04
data = read.csv(logsP04, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P04 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P04 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P04 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P04 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P04 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P04 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P04 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P04 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P05
data = read.csv(logsP05, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P05 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P05 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P05 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P05 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P05 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P05 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P05 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P05 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P06
data = read.csv(logsP06, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P06 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P06 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P06 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P06 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P06 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P06 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P06 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P06 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P07
data = read.csv(logsP07, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P07 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P07 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P07 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P07 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P07 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P07 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P07 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P07 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P08
data = read.csv(logsP08, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P08 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P08 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P08 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P08 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P08 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P08 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P08 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P08 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P09
data = read.csv(logsP09, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P09 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P09 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P09 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P09 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P09 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P09 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P09 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P09 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P10
data = read.csv(logsP10, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P10 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P10 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P10 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P10 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P10 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P10 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P10 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P10 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P11
data = read.csv(logsP11, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P11 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P11 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P11 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P11 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P11 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P11 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P11 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P11 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P12
data = read.csv(logsP12, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P12 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P12 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P12 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P12 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P12 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P12 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P12 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P12 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P13
data = read.csv(logsP13, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P13 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P13 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P13 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P13 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P13 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P13 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P13 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P13 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P14
data = read.csv(logsP14, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P14 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P14 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P14 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P14 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P14 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P14 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P14 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P14 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P15
data = read.csv(logsP15, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P15 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P15 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P15 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P15 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P15 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P15 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P15 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P15 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P06
data = read.csv(logsP16, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P16 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P16 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P16 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P16 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P16 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P16 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P16 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P16 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P17
data = read.csv(logsP17, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P17 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P17 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P17 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P17 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P17 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P17 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P17 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P17 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P18
data = read.csv(logsP18, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P18 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P18 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P18 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P18 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P18 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P18 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P18 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P18 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P19
data = read.csv(logsP19, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P19 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P19 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P19 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P19 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P19 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P19 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P19 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P19 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P20
data = read.csv(logsP20, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P20 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P20 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P20 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P20 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P20 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P20 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P20 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P20 <- on8$normalized_error_asifA - off8$normalized_error_asifA
d1 <- c(diff1_P01, diff5_P01,
diff1_P02, diff5_P02,
diff1_P03, diff5_P03,
diff1_P04, diff5_P04,
diff1_P05, diff5_P05,
diff1_P06, diff5_P06,
diff1_P07, diff5_P07,
diff1_P08, diff5_P08,
diff1_P09, diff5_P09,
diff1_P10, diff5_P10,
diff1_P11, diff5_P11,
diff1_P12, diff5_P12,
diff1_P13, diff5_P13,
diff1_P14, diff5_P14,
diff1_P15, diff5_P15,
diff1_P16, diff5_P16,
diff1_P17, diff5_P17,
diff1_P18, diff5_P18,
diff1_P19, diff5_P19,
diff1_P20, diff5_P20)
d2 <- c(diff2_P01, diff6_P01,
diff2_P02, diff6_P02,
diff2_P03, diff6_P03,
diff2_P04, diff6_P04,
diff2_P05, diff6_P05,
diff2_P06, diff6_P06,
diff2_P07, diff6_P07,
diff2_P08, diff6_P08,
diff2_P09, diff6_P09,
diff2_P10, diff6_P10,
diff2_P11, diff6_P11,
diff2_P12, diff6_P12,
diff2_P13, diff6_P13,
diff2_P14, diff6_P14,
diff2_P15, diff6_P15,
diff2_P16, diff6_P16,
diff2_P17, diff6_P17,
diff2_P18, diff6_P18,
diff2_P19, diff6_P19,
diff2_P20, diff6_P20)
d3 <- c(diff3_P01, diff7_P01,
diff3_P02, diff7_P02,
diff3_P03, diff7_P03,
diff3_P04, diff7_P04,
diff3_P05, diff7_P05,
diff3_P06, diff7_P06,
diff3_P07, diff7_P07,
diff3_P08, diff7_P08,
diff3_P09, diff7_P09,
diff3_P10, diff7_P10,
diff3_P11, diff7_P11,
diff3_P12, diff7_P12,
diff3_P13, diff7_P13,
diff3_P14, diff7_P14,
diff3_P15, diff7_P15,
diff3_P16, diff7_P16,
diff3_P17, diff7_P17,
diff3_P18, diff7_P18,
diff3_P19, diff7_P19,
diff3_P20, diff7_P20)
d4 <- c(diff4_P01, diff8_P01,
diff4_P02, diff8_P02,
diff4_P03, diff8_P03,
diff4_P04, diff8_P04,
diff4_P05, diff8_P05,
diff4_P06, diff8_P06,
diff4_P07, diff8_P07,
diff4_P08, diff8_P08,
diff4_P09, diff8_P09,
diff4_P10, diff8_P10,
diff4_P11, diff8_P11,
diff4_P12, diff8_P12,
diff4_P13, diff8_P13,
diff4_P14, diff8_P14,
diff4_P15, diff8_P15,
diff4_P16, diff8_P16,
diff4_P17, diff8_P17,
diff4_P18, diff8_P18,
diff4_P19, diff8_P19,
diff4_P20, diff8_P20)
pdf(file = "/Users/fanny/Desktop/xp2-swarms-ErrorSelection.pdf", width=9, height=9)
par(mfrow=c(2,2))
boxplot(d1, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Slow')
beeswarm(d1,
col = "darkgoldenrod1", add = TRUE)
boxplot(d2, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Fast')
beeswarm(d2,
col = "orange3", add = TRUE)
boxplot(d3, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Slow')
beeswarm(d3,
col = "darkgoldenrod1", add = TRUE)
boxplot(d4, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Fast')
beeswarm(d4,
col = "orange3", add = TRUE)
dev.off()
##### ACCURACY
## P01
data = read.csv(logsP01, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P01 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P01 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P01 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P01 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P01 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P01 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P01 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P01 <- on8$common_targets_percent - off8$common_targets_percent
## P02
data = read.csv(logsP02, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P02 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P02 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P02 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P02 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P02 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P02 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P02 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P02 <- on8$common_targets_percent - off8$common_targets_percent
## P03
data = read.csv(logsP03, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P03 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P03 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P03 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P03 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P03 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P03 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P03 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P03 <- on8$common_targets_percent - off8$common_targets_percent
## P04
data = read.csv(logsP04, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P04 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P04 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P04 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P04 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P04 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P04 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P04 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P04 <- on8$common_targets_percent - off8$common_targets_percent
## P05
data = read.csv(logsP05, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P05 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P05 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P05 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P05 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P05 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P05 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P05 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P05 <- on8$common_targets_percent - off8$common_targets_percent
## P06
data = read.csv(logsP06, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P06 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P06 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P06 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P06 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P06 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P06 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P06 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P06 <- on8$common_targets_percent - off8$common_targets_percent
## P07
data = read.csv(logsP07, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P07 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P07 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P07 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P07 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P07 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P07 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P07 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P07 <- on8$common_targets_percent - off8$common_targets_percent
## P08
data = read.csv(logsP08, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P08 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P08 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P08 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P08 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P08 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P08 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P08 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P08 <- on8$common_targets_percent - off8$common_targets_percent
## P09
data = read.csv(logsP09, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P09 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P09 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P09 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P09 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P09 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P09 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P09 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P09 <- on8$common_targets_percent - off8$common_targets_percent
## P10
data = read.csv(logsP10, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P10 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P10 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P10 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P10 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P10 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P10 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P10 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P10 <- on8$common_targets_percent - off8$common_targets_percent
## P11
data = read.csv(logsP11, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P11 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P11 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P11 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P11 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P11 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P11 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P11 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P11 <- on8$common_targets_percent - off8$common_targets_percent
## P12
data = read.csv(logsP12, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P12 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P12 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P12 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P12 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P12 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P12 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P12 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P12 <- on8$common_targets_percent - off8$common_targets_percent
## P13
data = read.csv(logsP13, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P13 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P13 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P13 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P13 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P13 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P13 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P13 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P13 <- on8$common_targets_percent - off8$common_targets_percent
## P14
data = read.csv(logsP14, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P14 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P14 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P14 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P14 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P14 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P14 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P14 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P14 <- on8$common_targets_percent - off8$common_targets_percent
## P15
data = read.csv(logsP15, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P15 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P15 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P15 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P15 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P15 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P15 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P15 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P15 <- on8$common_targets_percent - off8$common_targets_percent
## P16
data = read.csv(logsP16, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P16 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P16 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P16 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P16 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P16 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P16 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P16 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P16 <- on8$common_targets_percent - off8$common_targets_percent
## P17
data = read.csv(logsP17, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P17 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P17 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P17 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P17 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P17 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P17 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P17 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P17 <- on8$common_targets_percent - off8$common_targets_percent
## P18
data = read.csv(logsP18, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P18 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P18 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P18 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P18 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P18 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P18 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P18 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P18 <- on8$common_targets_percent - off8$common_targets_percent
## P19
data = read.csv(logsP19, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P19 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P19 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P19 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P19 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P19 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P19 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P19 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P19 <- on8$common_targets_percent - off8$common_targets_percent
## P20
data = read.csv(logsP20, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P20 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P20 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P20 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P20 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P20 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P20 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P20 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P20 <- on8$common_targets_percent - off8$common_targets_percent
d1 <- c(diff1_P01, diff5_P01,
diff1_P02, diff5_P02,
diff1_P03, diff5_P03,
diff1_P04, diff5_P04,
diff1_P05, diff5_P05,
diff1_P06, diff5_P06,
diff1_P07, diff5_P07,
diff1_P08, diff5_P08,
diff1_P09, diff5_P09,
diff1_P10, diff5_P10,
diff1_P11, diff5_P11,
diff1_P12, diff5_P12,
diff1_P13, diff5_P13,
diff1_P14, diff5_P14,
diff1_P15, diff5_P15,
diff1_P16, diff5_P16,
diff1_P17, diff5_P17,
diff1_P18, diff5_P18,
diff1_P19, diff5_P19,
diff1_P20, diff5_P20)
d2 <- c(diff2_P01, diff6_P01,
diff2_P02, diff6_P02,
diff2_P03, diff6_P03,
diff2_P04, diff6_P04,
diff2_P05, diff6_P05,
diff2_P06, diff6_P06,
diff2_P07, diff6_P07,
diff2_P08, diff6_P08,
diff2_P09, diff6_P09,
diff2_P10, diff6_P10,
diff2_P11, diff6_P11,
diff2_P12, diff6_P12,
diff2_P13, diff6_P13,
diff2_P14, diff6_P14,
diff2_P15, diff6_P15,
diff2_P16, diff6_P16,
diff2_P17, diff6_P17,
diff2_P18, diff6_P18,
diff2_P19, diff6_P19,
diff2_P20, diff6_P20)
d3 <- c(diff3_P01, diff7_P01,
diff3_P02, diff7_P02,
diff3_P03, diff7_P03,
diff3_P04, diff7_P04,
diff3_P05, diff7_P05,
diff3_P06, diff7_P06,
diff3_P07, diff7_P07,
diff3_P08, diff7_P08,
diff3_P09, diff7_P09,
diff3_P10, diff7_P10,
diff3_P11, diff7_P11,
diff3_P12, diff7_P12,
diff3_P13, diff7_P13,
diff3_P14, diff7_P14,
diff3_P15, diff7_P15,
diff3_P16, diff7_P16,
diff3_P17, diff7_P17,
diff3_P18, diff7_P18,
diff3_P19, diff7_P19,
diff3_P20, diff7_P20)
d4 <- c(diff4_P01, diff8_P01,
diff4_P02, diff8_P02,
diff4_P03, diff8_P03,
diff4_P04, diff8_P04,
diff4_P05, diff8_P05,
diff4_P06, diff8_P06,
diff4_P07, diff8_P07,
diff4_P08, diff8_P08,
diff4_P09, diff8_P09,
diff4_P10, diff8_P10,
diff4_P11, diff8_P11,
diff4_P12, diff8_P12,
diff4_P13, diff8_P13,
diff4_P14, diff8_P14,
diff4_P15, diff8_P15,
diff4_P16, diff8_P16,
diff4_P17, diff8_P17,
diff4_P18, diff8_P18,
diff4_P19, diff8_P19,
diff4_P20, diff8_P20)
pdf(file = "/Users/fanny/Desktop/xp2-swarms-AccuracyIdentification.pdf", width=12, height=12)
plot(0:1, type = "n", xaxt="n", yaxt="n", bty="n", xlab = "", ylab = "")
text(4,1 , "Accuracy Identification")
par(mfrow=c(2,2))
boxplot(d1, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Slow')
beeswarm(d1,
col = "seagreen3", add = TRUE)
boxplot(d2, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Fast')
beeswarm(d2,
col = "palegreen", add = TRUE)
boxplot(d3, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Slow')
beeswarm(d3,
col = "seagreen3", add = TRUE)
boxplot(d4, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Fast')
beeswarm(d4,
col = "palegreen", add = TRUE)
dev.off()
##### ACCURACY
## P01
data = read.csv(logsP01, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P01 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P01 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P01 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P01 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P01 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P01 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P01 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P01 <- on8$correct_targets_percent - off8$correct_targets_percent
## P02
data = read.csv(logsP02, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P02 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P02 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P02 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P02 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P02 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P02 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P02 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P02 <- on8$correct_targets_percent - off8$correct_targets_percent
## P03
data = read.csv(logsP03, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P03 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P03 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P03 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P03 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P03 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P03 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P03 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P03 <- on8$correct_targets_percent - off8$correct_targets_percent
## P04
data = read.csv(logsP04, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P04 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P04 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P04 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P04 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P04 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P04 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P04 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P04 <- on8$correct_targets_percent - off8$correct_targets_percent
## P05
data = read.csv(logsP05, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P05 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P05 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P05 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P05 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P05 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P05 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P05 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P05 <- on8$correct_targets_percent - off8$correct_targets_percent
## P06
data = read.csv(logsP06, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P06 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P06 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P06 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P06 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P06 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P06 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P06 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P06 <- on8$correct_targets_percent - off8$correct_targets_percent
## P07
data = read.csv(logsP07, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P07 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P07 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P07 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P07 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P07 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P07 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P07 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P07 <- on8$correct_targets_percent - off8$correct_targets_percent
## P08
data = read.csv(logsP08, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P08 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P08 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P08 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P08 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P08 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P08 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P08 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P08 <- on8$correct_targets_percent - off8$correct_targets_percent
## P09
data = read.csv(logsP09, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P09 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P09 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P09 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P09 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P09 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P09 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P09 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P09 <- on8$correct_targets_percent - off8$correct_targets_percent
## P10
data = read.csv(logsP10, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P10 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P10 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P10 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P10 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P10 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P10 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P10 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P10 <- on8$correct_targets_percent - off8$correct_targets_percent
## P11
data = read.csv(logsP11, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P11 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P11 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P11 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P11 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P11 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P11 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P11 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P11 <- on8$correct_targets_percent - off8$correct_targets_percent
## P12
data = read.csv(logsP12, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P12 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P12 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P12 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P12 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P12 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P12 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P12 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P12 <- on8$correct_targets_percent - off8$correct_targets_percent
## P13
data = read.csv(logsP13, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P13 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P13 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P13 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P13 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P13 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P13 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P13 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P13 <- on8$correct_targets_percent - off8$correct_targets_percent
## P14
data = read.csv(logsP14, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P14 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P14 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P14 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P14 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P14 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P14 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P14 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P14 <- on8$correct_targets_percent - off8$correct_targets_percent
## P15
data = read.csv(logsP15, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P15 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P15 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P15 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P15 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P15 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P15 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P15 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P15 <- on8$correct_targets_percent - off8$correct_targets_percent
## P16
data = read.csv(logsP16, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P16 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P16 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P16 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P16 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P16 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P16 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P16 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P16 <- on8$correct_targets_percent - off8$correct_targets_percent
## P17
data = read.csv(logsP17, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P17 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P17 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P17 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P17 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P17 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P17 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P17 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P17 <- on8$correct_targets_percent - off8$correct_targets_percent
## P18
data = read.csv(logsP18, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P18 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P18 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P18 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P18 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P18 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P18 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P18 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P18 <- on8$correct_targets_percent - off8$correct_targets_percent
## P19
data = read.csv(logsP19, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P19 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P19 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P19 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P19 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P19 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P19 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P19 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P19 <- on8$correct_targets_percent - off8$correct_targets_percent
## P20
data = read.csv(logsP20, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P20 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P20 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P20 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P20 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P20 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P20 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P20 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P20 <- on8$correct_targets_percent - off8$correct_targets_percent
d1 <- c(diff1_P01, diff5_P01,
diff1_P02, diff5_P02,
diff1_P03, diff5_P03,
diff1_P04, diff5_P04,
diff1_P05, diff5_P05,
diff1_P06, diff5_P06,
diff1_P07, diff5_P07,
diff1_P08, diff5_P08,
diff1_P09, diff5_P09,
diff1_P10, diff5_P10,
diff1_P11, diff5_P11,
diff1_P12, diff5_P12,
diff1_P13, diff5_P13,
diff1_P14, diff5_P14,
diff1_P15, diff5_P15,
diff1_P16, diff5_P16,
diff1_P17, diff5_P17,
diff1_P18, diff5_P18,
diff1_P19, diff5_P19,
diff1_P20, diff5_P20)
d2 <- c(diff2_P01, diff6_P01,
diff2_P02, diff6_P02,
diff2_P03, diff6_P03,
diff2_P04, diff6_P04,
diff2_P05, diff6_P05,
diff2_P06, diff6_P06,
diff2_P07, diff6_P07,
diff2_P08, diff6_P08,
diff2_P09, diff6_P09,
diff2_P10, diff6_P10,
diff2_P11, diff6_P11,
diff2_P12, diff6_P12,
diff2_P13, diff6_P13,
diff2_P14, diff6_P14,
diff2_P15, diff6_P15,
diff2_P16, diff6_P16,
diff2_P17, diff6_P17,
diff2_P18, diff6_P18,
diff2_P19, diff6_P19,
diff2_P20, diff6_P20)
d3 <- c(diff3_P01, diff7_P01,
diff3_P02, diff7_P02,
diff3_P03, diff7_P03,
diff3_P04, diff7_P04,
diff3_P05, diff7_P05,
diff3_P06, diff7_P06,
diff3_P07, diff7_P07,
diff3_P08, diff7_P08,
diff3_P09, diff7_P09,
diff3_P10, diff7_P10,
diff3_P11, diff7_P11,
diff3_P12, diff7_P12,
diff3_P13, diff7_P13,
diff3_P14, diff7_P14,
diff3_P15, diff7_P15,
diff3_P16, diff7_P16,
diff3_P17, diff7_P17,
diff3_P18, diff7_P18,
diff3_P19, diff7_P19,
diff3_P20, diff7_P20)
d4 <- c(diff4_P01, diff8_P01,
diff4_P02, diff8_P02,
diff4_P03, diff8_P03,
diff4_P04, diff8_P04,
diff4_P05, diff8_P05,
diff4_P06, diff8_P06,
diff4_P07, diff8_P07,
diff4_P08, diff8_P08,
diff4_P09, diff8_P09,
diff4_P10, diff8_P10,
diff4_P11, diff8_P11,
diff4_P12, diff8_P12,
diff4_P13, diff8_P13,
diff4_P14, diff8_P14,
diff4_P15, diff8_P15,
diff4_P16, diff8_P16,
diff4_P17, diff8_P17,
diff4_P18, diff8_P18,
diff4_P19, diff8_P19,
diff4_P20, diff8_P20)
pdf(file = "/Users/fanny/Desktop/xp2-swarms-AccuracySelection.pdf", width=9, height=9)
par(mfrow=c(2,2))
boxplot(d1, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Slow')
beeswarm(d1,
col = "seagreen3", add = TRUE)
boxplot(d2, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Fast')
beeswarm(d2,
col = "palegreen", add = TRUE)
boxplot(d3, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Slow')
beeswarm(d3,
col = "seagreen3", add = TRUE)
boxplot(d4, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Fast')
beeswarm(d4,
col = "palegreen", add = TRUE)
dev.off()
## dfr <- data.frame (
## d1 = d1,
## d2 = d2,
## d3 = d3,
## d4 = d4,
## y = c("p01-b1","p01-b5", "p02-b1", "p02-b1", "p03-b1","p03-b5", "p04-b1","p04-b5", "p05-b1","p05-b5",
## "p06-b1","p06-b5", "p07-b1","p07-b5", "p08-b1","p08-b5", "p09-b1","p09-b5", "p10-b1","p10-b5",
## "p11-b1","p11-b5", "p12-b1", "p12-b1", "p13-b1","p13-b5", "p14-b1","p14-b5", "p15-b1","p15-b5",
## "p16-b1","p16-b5", "p17-b1","p17-b5", "p18-b1","p18-b5", "p19-b1","p19-b5", "p20-b1","p20-b5")
## )
## boxplot(x ~ y, data = dfr,
## outline = FALSE, ## avoid double-plotting outliers, if any
## main = 'Y order Slow')
## beeswarm(x ~ y, data = dfr,
## col = rainbow(8), add = TRUE)
| /experimental-materials/scriptR-expe2-beeswarms.R | no_license | fannychevalier/staggered-animation | R | false | false | 126,045 | r | ################ EXPERIMENT 2 ##########################
#
# This script analyzes the results for experiment 2
#
########################################################
#------------------ LIBRARIES --------------------------#
install.packages("ggplot2")
library(ggplot2)
install.packages("boot")
library(boot)
install.packages("gridExtra")
library(gridExtra)
#-------------------------------------------------------#
#------------------ NAMES AND LABELS -------------------#
### short names for columns
crowd <- 'diff_crowding'
incrowd <- 'diff_innercrowding'
deform <- 'diff_deformation'
norm_error <- 'normalized_error'
pLabeled <- 'correct_targets_percent'
pClicked <- 'common_targets_percent'
labeled <- 'correct_targets'
clicked <- 'common_targets'
mislabeled <- 'targets_switch'
### text labels for charts
lDeform <- 'Deformation'
lIncrowd <- 'Inner crowding'
lCrowd <- 'Crowding'
lNorm_error <- 'Normalized error'
lPLabeled <- 'Accuracy (correct labeled / total targets)'
lPClicked <- 'Accuracy click (correct clicked / total targets)'
lPMislabeled <- 'Mislabeling (mislabeled / total targets)'
lPMislabeledPerClicks <- 'Mislabeling (mislabeled / correct clicked)'
lTaskA <- 'No ID'
lTaskB <- 'ID'
#-------------------------------------------------------#
#------------ LOAD AND CHECK DATA ----------------------#
logs <- "staggeredAnimation-xp2-logs.csv"
NB_PARTICIPANTS = 4 ## For sanity check
NB_REPS = 5
NB_BLOCKS = 16
NB_TARGETS = 3
data = read.csv(logs, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
### SANITY CHECK ###
if (length(data[,1]) != NB_PARTICIPANTS*NB_REPS*NB_BLOCKS)
cat("Warning: wrong number of trials")
#-------------------------------------------------------#
#--------------- USEFUL FUNCTIONS ----------------------#
##### get subset corresponding to a difficulty level 'level' of a measure 'measure'
getSubsetByDifficulty <- function(x, measure, level) {
result <- x[x[,measure]==level,]
if (length(result[,1]) != NB_PARTICIPANTS*NB_REPS*NB_CONDITIONS/2) ### sanity check
cat("Warning: wrong number of trials in getSubsetByDifficulty")
return (result)
}
## Exemple: call getSubsetByDifficulty(condA, innercrowd, 'E')
aggregateParticipant <- function(x,measure,f) {
if (f=="mean")
return (aggregateMean(x,measure))
else
return (aggregateMedian(x,measure))
}
###### get mean per participant
aggregateMean <- function(x, measure) {
aggdata <- aggregate(x[,measure] ~ participant, data = x, mean)
return (aggdata)
}
###### get median per participant
aggregateMedian <- function(x, measure) {
aggdata <- aggregate(x[,measure] ~ participant, data = x, median)
return (aggdata)
}
##### get global percent of success
getPercent <- function(x, measure) {
aggdata <- aggregate(x[,measure] ~ participant, data = x, sum)
aggdata[,2] <- aggdata[,2]/48
return (aggdata)
}
### get mean of a vector by Measure
getMean <- function(x,measure) {
mean(x[,measure])
}
### get median of a vector by Measure
getMedian <- function(x,measure) {
return(median(x[,measure]))
}
### get confidence interval
getCI <- function(x,f) {
if (f == "mean")
return(getCIMean(x))
else # median
return(getCIMedian(x))
}
### get Confidence interval (mean)
getCIMean <- function(x) {
number.measures <- length(x)
number.samples <- 5000
sample.mean <- function(x, index) {
return(mean(x[index]))
}
boot.object <- boot(x, sample.mean, R = number.samples)
confidence.intervals <- quantile(boot.object$t, c(0.025, 0.975)) #the samples are in boot.object$t
boot.object$t0 # the mean
return (confidence.intervals)
}
### get Confidence interval (median)
getCIMedian <- function(x) {
number.measures <- length(x)
number.samples <- 5000
sample.median <- function(x, index) {
return(median(x[index]))
}
boot.object <- boot(x, sample.median, R = number.samples)
confidence.intervals <- quantile(boot.object$t, c(0.025, 0.975)) #the samples are in boot.object$t
boot.object$t0 # the median
return (confidence.intervals)
}
## get Lower value of a CI,
# Param: column=<normalized_error, accuracy>, f=<"mean", "median">
getLowerCI <- function(x, f) {
ci <- getCI(x,f)
ci <- as.data.frame(ci)
return(ci[1,1])
}
getUpperCI <- function(x, f) {
ci <- getCI(x,f)
ci <- as.data.frame(ci)
return(ci[2,1])
}
#-------------------------------------------------------#
#--------------- FUNCTIONS FOR PLOTS ----------------------#
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1 <- off1$normalized_error - on1$normalized_error
diff2 <- off2$normalized_error - on2$normalized_error
diff3 <- off3$normalized_error - on3$normalized_error
diff4 <- off4$normalized_error - on4$normalized_error
diff5 <- off5$normalized_error - on5$normalized_error
diff6 <- off6$normalized_error - on6$normalized_error
diff7 <- off7$normalized_error - on7$normalized_error
diff8 <- off8$normalized_error - on8$normalized_error
#-------------------------------------------------------#
### LOGS PARTICIPANTS
logsP01 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p01/results/results_p01.csv"
logsP02 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p02/results/results_p02.csv"
logsP03 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p03/results/results_p03.csv"
logsP04 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p04/results/results_p04.csv"
logsP05 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p05/results/results_p05.csv"
logsP06 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p06/results/results_p06.csv"
logsP07 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p07/results/results_p07.csv"
logsP08 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p08/results/results_p08.csv"
logsP09 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p09/results/results_p09.csv"
logsP10 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p10/results/results_p10.csv"
logsP11 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p11/results/results_p11.csv"
logsP12 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p12/results/results_p12.csv"
logsP13 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p13/results/results_p13.csv"
logsP14 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p14/results/results_p14.csv"
logsP15 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p15/results/results_p15.csv"
logsP16 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p16/results/results_p16.csv"
logsP17 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p17/results/results_p17.csv"
logsP18 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p18/results/results_p18.csv"
logsP19 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p19/results/results_p19.csv"
logsP20 <- "/Users/fanny/Documents/workspace/staggeredAnim-Pierre/data/xp2-main/p20/results/results_p20.csv"
## P01
data = read.csv(logsP01, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P01 <- on1$normalized_error - off1$normalized_error
diff2_P01 <- on2$normalized_error - off2$normalized_error
diff3_P01 <- on3$normalized_error - off3$normalized_error
diff4_P01 <- on4$normalized_error - off4$normalized_error
diff5_P01 <- on5$normalized_error - off5$normalized_error
diff6_P01 <- on6$normalized_error - off6$normalized_error
diff7_P01 <- on7$normalized_error - off7$normalized_error
diff8_P01 <- on8$normalized_error - off8$normalized_error
## P02
data = read.csv(logsP02, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P02 <- on1$normalized_error - off1$normalized_error
diff2_P02 <- on2$normalized_error - off2$normalized_error
diff3_P02 <- on3$normalized_error - off3$normalized_error
diff4_P02 <- on4$normalized_error - off4$normalized_error
diff5_P02 <- on5$normalized_error - off5$normalized_error
diff6_P02 <- on6$normalized_error - off6$normalized_error
diff7_P02 <- on7$normalized_error - off7$normalized_error
diff8_P02 <- on8$normalized_error - off8$normalized_error
## P03
data = read.csv(logsP03, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P03 <- on1$normalized_error - off1$normalized_error
diff2_P03 <- on2$normalized_error - off2$normalized_error
diff3_P03 <- on3$normalized_error - off3$normalized_error
diff4_P03 <- on4$normalized_error - off4$normalized_error
diff5_P03 <- on5$normalized_error - off5$normalized_error
diff6_P03 <- on6$normalized_error - off6$normalized_error
diff7_P03 <- on7$normalized_error - off7$normalized_error
diff8_P03 <- on8$normalized_error - off8$normalized_error
## P04
data = read.csv(logsP04, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P04 <- on1$normalized_error - off1$normalized_error
diff2_P04 <- on2$normalized_error - off2$normalized_error
diff3_P04 <- on3$normalized_error - off3$normalized_error
diff4_P04 <- on4$normalized_error - off4$normalized_error
diff5_P04 <- on5$normalized_error - off5$normalized_error
diff6_P04 <- on6$normalized_error - off6$normalized_error
diff7_P04 <- on7$normalized_error - off7$normalized_error
diff8_P04 <- on8$normalized_error - off8$normalized_error
## P05
data = read.csv(logsP05, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P05 <- on1$normalized_error - off1$normalized_error
diff2_P05 <- on2$normalized_error - off2$normalized_error
diff3_P05 <- on3$normalized_error - off3$normalized_error
diff4_P05 <- on4$normalized_error - off4$normalized_error
diff5_P05 <- on5$normalized_error - off5$normalized_error
diff6_P05 <- on6$normalized_error - off6$normalized_error
diff7_P05 <- on7$normalized_error - off7$normalized_error
diff8_P05 <- on8$normalized_error - off8$normalized_error
## P06
data = read.csv(logsP06, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P06 <- on1$normalized_error - off1$normalized_error
diff2_P06 <- on2$normalized_error - off2$normalized_error
diff3_P06 <- on3$normalized_error - off3$normalized_error
diff4_P06 <- on4$normalized_error - off4$normalized_error
diff5_P06 <- on5$normalized_error - off5$normalized_error
diff6_P06 <- on6$normalized_error - off6$normalized_error
diff7_P06 <- on7$normalized_error - off7$normalized_error
diff8_P06 <- on8$normalized_error - off8$normalized_error
## P07
data = read.csv(logsP07, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P07 <- on1$normalized_error - off1$normalized_error
diff2_P07 <- on2$normalized_error - off2$normalized_error
diff3_P07 <- on3$normalized_error - off3$normalized_error
diff4_P07 <- on4$normalized_error - off4$normalized_error
diff5_P07 <- on5$normalized_error - off5$normalized_error
diff6_P07 <- on6$normalized_error - off6$normalized_error
diff7_P07 <- on7$normalized_error - off7$normalized_error
diff8_P07 <- on8$normalized_error - off8$normalized_error
## P08
data = read.csv(logsP08, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P08 <- on1$normalized_error - off1$normalized_error
diff2_P08 <- on2$normalized_error - off2$normalized_error
diff3_P08 <- on3$normalized_error - off3$normalized_error
diff4_P08 <- on4$normalized_error - off4$normalized_error
diff5_P08 <- on5$normalized_error - off5$normalized_error
diff6_P08 <- on6$normalized_error - off6$normalized_error
diff7_P08 <- on7$normalized_error - off7$normalized_error
diff8_P08 <- on8$normalized_error - off8$normalized_error
## P09
data = read.csv(logsP09, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P09 <- on1$normalized_error - off1$normalized_error
diff2_P09 <- on2$normalized_error - off2$normalized_error
diff3_P09 <- on3$normalized_error - off3$normalized_error
diff4_P09 <- on4$normalized_error - off4$normalized_error
diff5_P09 <- on5$normalized_error - off5$normalized_error
diff6_P09 <- on6$normalized_error - off6$normalized_error
diff7_P09 <- on7$normalized_error - off7$normalized_error
diff8_P09 <- on8$normalized_error - off8$normalized_error
## P10
data = read.csv(logsP10, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P10 <- on1$normalized_error - off1$normalized_error
diff2_P10 <- on2$normalized_error - off2$normalized_error
diff3_P10 <- on3$normalized_error - off3$normalized_error
diff4_P10 <- on4$normalized_error - off4$normalized_error
diff5_P10 <- on5$normalized_error - off5$normalized_error
diff6_P10 <- on6$normalized_error - off6$normalized_error
diff7_P10 <- on7$normalized_error - off7$normalized_error
diff8_P10 <- on8$normalized_error - off8$normalized_error
## P11
data = read.csv(logsP11, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P11 <- on1$normalized_error - off1$normalized_error
diff2_P11 <- on2$normalized_error - off2$normalized_error
diff3_P11 <- on3$normalized_error - off3$normalized_error
diff4_P11 <- on4$normalized_error - off4$normalized_error
diff5_P11 <- on5$normalized_error - off5$normalized_error
diff6_P11 <- on6$normalized_error - off6$normalized_error
diff7_P11 <- on7$normalized_error - off7$normalized_error
diff8_P11 <- on8$normalized_error - off8$normalized_error
## P12
data = read.csv(logsP12, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P12 <- on1$normalized_error - off1$normalized_error
diff2_P12 <- on2$normalized_error - off2$normalized_error
diff3_P12 <- on3$normalized_error - off3$normalized_error
diff4_P12 <- on4$normalized_error - off4$normalized_error
diff5_P12 <- on5$normalized_error - off5$normalized_error
diff6_P12 <- on6$normalized_error - off6$normalized_error
diff7_P12 <- on7$normalized_error - off7$normalized_error
diff8_P12 <- on8$normalized_error - off8$normalized_error
## P13
data = read.csv(logsP13, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P13 <- on1$normalized_error - off1$normalized_error
diff2_P13 <- on2$normalized_error - off2$normalized_error
diff3_P13 <- on3$normalized_error - off3$normalized_error
diff4_P13 <- on4$normalized_error - off4$normalized_error
diff5_P13 <- on5$normalized_error - off5$normalized_error
diff6_P13 <- on6$normalized_error - off6$normalized_error
diff7_P13 <- on7$normalized_error - off7$normalized_error
diff8_P13 <- on8$normalized_error - off8$normalized_error
## P14
data = read.csv(logsP14, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P14 <- on1$normalized_error - off1$normalized_error
diff2_P14 <- on2$normalized_error - off2$normalized_error
diff3_P14 <- on3$normalized_error - off3$normalized_error
diff4_P14 <- on4$normalized_error - off4$normalized_error
diff5_P14 <- on5$normalized_error - off5$normalized_error
diff6_P14 <- on6$normalized_error - off6$normalized_error
diff7_P14 <- on7$normalized_error - off7$normalized_error
diff8_P14 <- on8$normalized_error - off8$normalized_error
## P15
data = read.csv(logsP15, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P15 <- on1$normalized_error - off1$normalized_error
diff2_P15 <- on2$normalized_error - off2$normalized_error
diff3_P15 <- on3$normalized_error - off3$normalized_error
diff4_P15 <- on4$normalized_error - off4$normalized_error
diff5_P15 <- on5$normalized_error - off5$normalized_error
diff6_P15 <- on6$normalized_error - off6$normalized_error
diff7_P15 <- on7$normalized_error - off7$normalized_error
diff8_P15 <- on8$normalized_error - off8$normalized_error
## P16
data = read.csv(logsP16, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P16 <- on1$normalized_error - off1$normalized_error
diff2_P16 <- on2$normalized_error - off2$normalized_error
diff3_P16 <- on3$normalized_error - off3$normalized_error
diff4_P16 <- on4$normalized_error - off4$normalized_error
diff5_P16 <- on5$normalized_error - off5$normalized_error
diff6_P16 <- on6$normalized_error - off6$normalized_error
diff7_P16 <- on7$normalized_error - off7$normalized_error
diff8_P16 <- on8$normalized_error - off8$normalized_error
## P17
data = read.csv(logsP17, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P17 <- on1$normalized_error - off1$normalized_error
diff2_P17 <- on2$normalized_error - off2$normalized_error
diff3_P17 <- on3$normalized_error - off3$normalized_error
diff4_P17 <- on4$normalized_error - off4$normalized_error
diff5_P17 <- on5$normalized_error - off5$normalized_error
diff6_P17 <- on6$normalized_error - off6$normalized_error
diff7_P17 <- on7$normalized_error - off7$normalized_error
diff8_P17 <- on8$normalized_error - off8$normalized_error
## P18
data = read.csv(logsP18, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P18 <- on1$normalized_error - off1$normalized_error
diff2_P18 <- on2$normalized_error - off2$normalized_error
diff3_P18 <- on3$normalized_error - off3$normalized_error
diff4_P18 <- on4$normalized_error - off4$normalized_error
diff5_P18 <- on5$normalized_error - off5$normalized_error
diff6_P18 <- on6$normalized_error - off6$normalized_error
diff7_P18 <- on7$normalized_error - off7$normalized_error
diff8_P18 <- on8$normalized_error - off8$normalized_error
## P19
data = read.csv(logsP19, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P19 <- on1$normalized_error - off1$normalized_error
diff2_P19 <- on2$normalized_error - off2$normalized_error
diff3_P19 <- on3$normalized_error - off3$normalized_error
diff4_P19 <- on4$normalized_error - off4$normalized_error
diff5_P19 <- on5$normalized_error - off5$normalized_error
diff6_P19 <- on6$normalized_error - off6$normalized_error
diff7_P19 <- on7$normalized_error - off7$normalized_error
diff8_P19 <- on8$normalized_error - off8$normalized_error
## P20
data = read.csv(logsP20, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P20 <- on1$normalized_error - off1$normalized_error
diff2_P20 <- on2$normalized_error - off2$normalized_error
diff3_P20 <- on3$normalized_error - off3$normalized_error
diff4_P20 <- on4$normalized_error - off4$normalized_error
diff5_P20 <- on5$normalized_error - off5$normalized_error
diff6_P20 <- on6$normalized_error - off6$normalized_error
diff7_P20 <- on7$normalized_error - off7$normalized_error
diff8_P20 <- on8$normalized_error - off8$normalized_error
d1 <- c(diff1_P01, diff5_P01,
diff1_P02, diff5_P02,
diff1_P03, diff5_P03,
diff1_P04, diff5_P04,
diff1_P05, diff5_P05,
diff1_P06, diff5_P06,
diff1_P07, diff5_P07,
diff1_P08, diff5_P08,
diff1_P09, diff5_P09,
diff1_P10, diff5_P10,
diff1_P11, diff5_P11,
diff1_P12, diff5_P12,
diff1_P13, diff5_P13,
diff1_P14, diff5_P14,
diff1_P15, diff5_P15,
diff1_P16, diff5_P16,
diff1_P17, diff5_P17,
diff1_P18, diff5_P18,
diff1_P19, diff5_P19,
diff1_P20, diff5_P20)
d2 <- c(diff2_P01, diff6_P01,
diff2_P02, diff6_P02,
diff2_P03, diff6_P03,
diff2_P04, diff6_P04,
diff2_P05, diff6_P05,
diff2_P06, diff6_P06,
diff2_P07, diff6_P07,
diff2_P08, diff6_P08,
diff2_P09, diff6_P09,
diff2_P10, diff6_P10,
diff2_P11, diff6_P11,
diff2_P12, diff6_P12,
diff2_P13, diff6_P13,
diff2_P14, diff6_P14,
diff2_P15, diff6_P15,
diff2_P16, diff6_P16,
diff2_P17, diff6_P17,
diff2_P18, diff6_P18,
diff2_P19, diff6_P19,
diff2_P20, diff6_P20)
d3 <- c(diff3_P01, diff7_P01,
diff3_P02, diff7_P02,
diff3_P03, diff7_P03,
diff3_P04, diff7_P04,
diff3_P05, diff7_P05,
diff3_P06, diff7_P06,
diff3_P07, diff7_P07,
diff3_P08, diff7_P08,
diff3_P09, diff7_P09,
diff3_P10, diff7_P10,
diff3_P11, diff7_P11,
diff3_P12, diff7_P12,
diff3_P13, diff7_P13,
diff3_P14, diff7_P14,
diff3_P15, diff7_P15,
diff3_P16, diff7_P16,
diff3_P17, diff7_P17,
diff3_P18, diff7_P18,
diff3_P19, diff7_P19,
diff3_P20, diff7_P20)
d4 <- c(diff4_P01, diff8_P01,
diff4_P02, diff8_P02,
diff4_P03, diff8_P03,
diff4_P04, diff8_P04,
diff4_P05, diff8_P05,
diff4_P06, diff8_P06,
diff4_P07, diff8_P07,
diff4_P08, diff8_P08,
diff4_P09, diff8_P09,
diff4_P10, diff8_P10,
diff4_P11, diff8_P11,
diff4_P12, diff8_P12,
diff4_P13, diff8_P13,
diff4_P14, diff8_P14,
diff4_P15, diff8_P15,
diff4_P16, diff8_P16,
diff4_P17, diff8_P17,
diff4_P18, diff8_P18,
diff4_P19, diff8_P19,
diff4_P20, diff8_P20)
pdf(file = "/Users/fanny/Desktop/xp2-swarms-ErrorIdentification.pdf", width=9, height=9)
par(mfrow=c(2,2))
boxplot(d1, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Slow')
beeswarm(d1,
col = "darkgoldenrod1", add = TRUE)
boxplot(d2, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Fast')
beeswarm(d2,
col = "orange3", add = TRUE)
boxplot(d3, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Slow')
beeswarm(d3,
col = "darkgoldenrod1", add = TRUE)
boxplot(d4, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Fast')
beeswarm(d4,
col = "orange3", add = TRUE)
dev.off()
#### Normalized error as if A
## P01
data = read.csv(logsP01, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P01 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P01 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P01 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P01 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P01 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P01 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P01 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P01 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P02
data = read.csv(logsP02, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P02 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P02 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P02 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P02 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P02 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P02 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P02 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P02 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P03
data = read.csv(logsP03, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P03 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P03 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P03 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P03 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P03 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P03 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P03 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P03 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P04
data = read.csv(logsP04, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P04 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P04 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P04 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P04 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P04 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P04 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P04 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P04 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P05
data = read.csv(logsP05, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P05 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P05 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P05 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P05 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P05 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P05 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P05 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P05 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P06
data = read.csv(logsP06, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P06 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P06 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P06 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P06 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P06 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P06 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P06 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P06 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P07
data = read.csv(logsP07, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P07 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P07 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P07 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P07 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P07 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P07 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P07 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P07 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P08
data = read.csv(logsP08, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P08 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P08 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P08 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P08 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P08 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P08 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P08 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P08 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P09
data = read.csv(logsP09, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P09 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P09 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P09 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P09 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P09 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P09 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P09 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P09 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P10
data = read.csv(logsP10, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P10 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P10 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P10 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P10 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P10 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P10 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P10 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P10 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P11
data = read.csv(logsP11, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P11 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P11 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P11 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P11 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P11 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P11 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P11 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P11 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P12
data = read.csv(logsP12, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P12 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P12 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P12 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P12 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P12 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P12 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P12 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P12 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P13
data = read.csv(logsP13, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P13 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P13 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P13 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P13 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P13 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P13 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P13 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P13 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P14
data = read.csv(logsP14, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P14 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P14 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P14 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P14 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P14 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P14 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P14 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P14 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P15
data = read.csv(logsP15, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P15 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P15 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P15 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P15 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P15 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P15 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P15 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P15 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P06
data = read.csv(logsP16, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P16 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P16 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P16 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P16 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P16 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P16 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P16 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P16 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P17
data = read.csv(logsP17, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P17 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P17 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P17 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P17 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P17 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P17 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P17 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P17 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P18
data = read.csv(logsP18, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P18 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P18 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P18 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P18 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P18 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P18 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P18 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P18 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P19
data = read.csv(logsP19, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P19 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P19 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P19 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P19 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P19 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P19 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P19 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P19 <- on8$normalized_error_asifA - off8$normalized_error_asifA
## P20
data = read.csv(logsP20, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P20 <- on1$normalized_error_asifA - off1$normalized_error_asifA
diff2_P20 <- on2$normalized_error_asifA - off2$normalized_error_asifA
diff3_P20 <- on3$normalized_error_asifA - off3$normalized_error_asifA
diff4_P20 <- on4$normalized_error_asifA - off4$normalized_error_asifA
diff5_P20 <- on5$normalized_error_asifA - off5$normalized_error_asifA
diff6_P20 <- on6$normalized_error_asifA - off6$normalized_error_asifA
diff7_P20 <- on7$normalized_error_asifA - off7$normalized_error_asifA
diff8_P20 <- on8$normalized_error_asifA - off8$normalized_error_asifA
d1 <- c(diff1_P01, diff5_P01,
diff1_P02, diff5_P02,
diff1_P03, diff5_P03,
diff1_P04, diff5_P04,
diff1_P05, diff5_P05,
diff1_P06, diff5_P06,
diff1_P07, diff5_P07,
diff1_P08, diff5_P08,
diff1_P09, diff5_P09,
diff1_P10, diff5_P10,
diff1_P11, diff5_P11,
diff1_P12, diff5_P12,
diff1_P13, diff5_P13,
diff1_P14, diff5_P14,
diff1_P15, diff5_P15,
diff1_P16, diff5_P16,
diff1_P17, diff5_P17,
diff1_P18, diff5_P18,
diff1_P19, diff5_P19,
diff1_P20, diff5_P20)
d2 <- c(diff2_P01, diff6_P01,
diff2_P02, diff6_P02,
diff2_P03, diff6_P03,
diff2_P04, diff6_P04,
diff2_P05, diff6_P05,
diff2_P06, diff6_P06,
diff2_P07, diff6_P07,
diff2_P08, diff6_P08,
diff2_P09, diff6_P09,
diff2_P10, diff6_P10,
diff2_P11, diff6_P11,
diff2_P12, diff6_P12,
diff2_P13, diff6_P13,
diff2_P14, diff6_P14,
diff2_P15, diff6_P15,
diff2_P16, diff6_P16,
diff2_P17, diff6_P17,
diff2_P18, diff6_P18,
diff2_P19, diff6_P19,
diff2_P20, diff6_P20)
d3 <- c(diff3_P01, diff7_P01,
diff3_P02, diff7_P02,
diff3_P03, diff7_P03,
diff3_P04, diff7_P04,
diff3_P05, diff7_P05,
diff3_P06, diff7_P06,
diff3_P07, diff7_P07,
diff3_P08, diff7_P08,
diff3_P09, diff7_P09,
diff3_P10, diff7_P10,
diff3_P11, diff7_P11,
diff3_P12, diff7_P12,
diff3_P13, diff7_P13,
diff3_P14, diff7_P14,
diff3_P15, diff7_P15,
diff3_P16, diff7_P16,
diff3_P17, diff7_P17,
diff3_P18, diff7_P18,
diff3_P19, diff7_P19,
diff3_P20, diff7_P20)
d4 <- c(diff4_P01, diff8_P01,
diff4_P02, diff8_P02,
diff4_P03, diff8_P03,
diff4_P04, diff8_P04,
diff4_P05, diff8_P05,
diff4_P06, diff8_P06,
diff4_P07, diff8_P07,
diff4_P08, diff8_P08,
diff4_P09, diff8_P09,
diff4_P10, diff8_P10,
diff4_P11, diff8_P11,
diff4_P12, diff8_P12,
diff4_P13, diff8_P13,
diff4_P14, diff8_P14,
diff4_P15, diff8_P15,
diff4_P16, diff8_P16,
diff4_P17, diff8_P17,
diff4_P18, diff8_P18,
diff4_P19, diff8_P19,
diff4_P20, diff8_P20)
pdf(file = "/Users/fanny/Desktop/xp2-swarms-ErrorSelection.pdf", width=9, height=9)
par(mfrow=c(2,2))
boxplot(d1, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Slow')
beeswarm(d1,
col = "darkgoldenrod1", add = TRUE)
boxplot(d2, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Fast')
beeswarm(d2,
col = "orange3", add = TRUE)
boxplot(d3, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Slow')
beeswarm(d3,
col = "darkgoldenrod1", add = TRUE)
boxplot(d4, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Fast')
beeswarm(d4,
col = "orange3", add = TRUE)
dev.off()
##### ACCURACY
## P01
data = read.csv(logsP01, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P01 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P01 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P01 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P01 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P01 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P01 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P01 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P01 <- on8$common_targets_percent - off8$common_targets_percent
## P02
data = read.csv(logsP02, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P02 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P02 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P02 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P02 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P02 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P02 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P02 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P02 <- on8$common_targets_percent - off8$common_targets_percent
## P03
data = read.csv(logsP03, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P03 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P03 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P03 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P03 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P03 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P03 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P03 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P03 <- on8$common_targets_percent - off8$common_targets_percent
## P04
data = read.csv(logsP04, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P04 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P04 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P04 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P04 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P04 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P04 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P04 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P04 <- on8$common_targets_percent - off8$common_targets_percent
## P05
data = read.csv(logsP05, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P05 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P05 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P05 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P05 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P05 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P05 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P05 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P05 <- on8$common_targets_percent - off8$common_targets_percent
## P06
data = read.csv(logsP06, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P06 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P06 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P06 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P06 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P06 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P06 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P06 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P06 <- on8$common_targets_percent - off8$common_targets_percent
## P07
data = read.csv(logsP07, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P07 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P07 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P07 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P07 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P07 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P07 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P07 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P07 <- on8$common_targets_percent - off8$common_targets_percent
## P08
data = read.csv(logsP08, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P08 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P08 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P08 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P08 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P08 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P08 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P08 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P08 <- on8$common_targets_percent - off8$common_targets_percent
## P09
data = read.csv(logsP09, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P09 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P09 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P09 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P09 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P09 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P09 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P09 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P09 <- on8$common_targets_percent - off8$common_targets_percent
## P10
data = read.csv(logsP10, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P10 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P10 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P10 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P10 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P10 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P10 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P10 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P10 <- on8$common_targets_percent - off8$common_targets_percent
## P11
data = read.csv(logsP11, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P11 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P11 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P11 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P11 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P11 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P11 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P11 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P11 <- on8$common_targets_percent - off8$common_targets_percent
## P12
data = read.csv(logsP12, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P12 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P12 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P12 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P12 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P12 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P12 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P12 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P12 <- on8$common_targets_percent - off8$common_targets_percent
## P13
data = read.csv(logsP13, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P13 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P13 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P13 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P13 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P13 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P13 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P13 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P13 <- on8$common_targets_percent - off8$common_targets_percent
## P14
data = read.csv(logsP14, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P14 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P14 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P14 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P14 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P14 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P14 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P14 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P14 <- on8$common_targets_percent - off8$common_targets_percent
## P15
data = read.csv(logsP15, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P15 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P15 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P15 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P15 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P15 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P15 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P15 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P15 <- on8$common_targets_percent - off8$common_targets_percent
## P16
data = read.csv(logsP16, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P16 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P16 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P16 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P16 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P16 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P16 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P16 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P16 <- on8$common_targets_percent - off8$common_targets_percent
## P17
data = read.csv(logsP17, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P17 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P17 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P17 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P17 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P17 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P17 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P17 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P17 <- on8$common_targets_percent - off8$common_targets_percent
## P18
data = read.csv(logsP18, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P18 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P18 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P18 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P18 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P18 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P18 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P18 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P18 <- on8$common_targets_percent - off8$common_targets_percent
## P19
data = read.csv(logsP19, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P19 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P19 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P19 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P19 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P19 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P19 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P19 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P19 <- on8$common_targets_percent - off8$common_targets_percent
## P20
data = read.csv(logsP20, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P20 <- on1$common_targets_percent - off1$common_targets_percent
diff2_P20 <- on2$common_targets_percent - off2$common_targets_percent
diff3_P20 <- on3$common_targets_percent - off3$common_targets_percent
diff4_P20 <- on4$common_targets_percent - off4$common_targets_percent
diff5_P20 <- on5$common_targets_percent - off5$common_targets_percent
diff6_P20 <- on6$common_targets_percent - off6$common_targets_percent
diff7_P20 <- on7$common_targets_percent - off7$common_targets_percent
diff8_P20 <- on8$common_targets_percent - off8$common_targets_percent
d1 <- c(diff1_P01, diff5_P01,
diff1_P02, diff5_P02,
diff1_P03, diff5_P03,
diff1_P04, diff5_P04,
diff1_P05, diff5_P05,
diff1_P06, diff5_P06,
diff1_P07, diff5_P07,
diff1_P08, diff5_P08,
diff1_P09, diff5_P09,
diff1_P10, diff5_P10,
diff1_P11, diff5_P11,
diff1_P12, diff5_P12,
diff1_P13, diff5_P13,
diff1_P14, diff5_P14,
diff1_P15, diff5_P15,
diff1_P16, diff5_P16,
diff1_P17, diff5_P17,
diff1_P18, diff5_P18,
diff1_P19, diff5_P19,
diff1_P20, diff5_P20)
d2 <- c(diff2_P01, diff6_P01,
diff2_P02, diff6_P02,
diff2_P03, diff6_P03,
diff2_P04, diff6_P04,
diff2_P05, diff6_P05,
diff2_P06, diff6_P06,
diff2_P07, diff6_P07,
diff2_P08, diff6_P08,
diff2_P09, diff6_P09,
diff2_P10, diff6_P10,
diff2_P11, diff6_P11,
diff2_P12, diff6_P12,
diff2_P13, diff6_P13,
diff2_P14, diff6_P14,
diff2_P15, diff6_P15,
diff2_P16, diff6_P16,
diff2_P17, diff6_P17,
diff2_P18, diff6_P18,
diff2_P19, diff6_P19,
diff2_P20, diff6_P20)
d3 <- c(diff3_P01, diff7_P01,
diff3_P02, diff7_P02,
diff3_P03, diff7_P03,
diff3_P04, diff7_P04,
diff3_P05, diff7_P05,
diff3_P06, diff7_P06,
diff3_P07, diff7_P07,
diff3_P08, diff7_P08,
diff3_P09, diff7_P09,
diff3_P10, diff7_P10,
diff3_P11, diff7_P11,
diff3_P12, diff7_P12,
diff3_P13, diff7_P13,
diff3_P14, diff7_P14,
diff3_P15, diff7_P15,
diff3_P16, diff7_P16,
diff3_P17, diff7_P17,
diff3_P18, diff7_P18,
diff3_P19, diff7_P19,
diff3_P20, diff7_P20)
d4 <- c(diff4_P01, diff8_P01,
diff4_P02, diff8_P02,
diff4_P03, diff8_P03,
diff4_P04, diff8_P04,
diff4_P05, diff8_P05,
diff4_P06, diff8_P06,
diff4_P07, diff8_P07,
diff4_P08, diff8_P08,
diff4_P09, diff8_P09,
diff4_P10, diff8_P10,
diff4_P11, diff8_P11,
diff4_P12, diff8_P12,
diff4_P13, diff8_P13,
diff4_P14, diff8_P14,
diff4_P15, diff8_P15,
diff4_P16, diff8_P16,
diff4_P17, diff8_P17,
diff4_P18, diff8_P18,
diff4_P19, diff8_P19,
diff4_P20, diff8_P20)
pdf(file = "/Users/fanny/Desktop/xp2-swarms-AccuracyIdentification.pdf", width=12, height=12)
plot(0:1, type = "n", xaxt="n", yaxt="n", bty="n", xlab = "", ylab = "")
text(4,1 , "Accuracy Identification")
par(mfrow=c(2,2))
boxplot(d1, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Slow')
beeswarm(d1,
col = "seagreen3", add = TRUE)
boxplot(d2, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Fast')
beeswarm(d2,
col = "palegreen", add = TRUE)
boxplot(d3, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Slow')
beeswarm(d3,
col = "seagreen3", add = TRUE)
boxplot(d4, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Fast')
beeswarm(d4,
col = "palegreen", add = TRUE)
dev.off()
##### ACCURACY
## P01
data = read.csv(logsP01, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P01 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P01 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P01 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P01 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P01 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P01 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P01 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P01 <- on8$correct_targets_percent - off8$correct_targets_percent
## P02
data = read.csv(logsP02, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P02 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P02 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P02 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P02 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P02 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P02 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P02 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P02 <- on8$correct_targets_percent - off8$correct_targets_percent
## P03
data = read.csv(logsP03, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P03 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P03 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P03 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P03 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P03 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P03 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P03 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P03 <- on8$correct_targets_percent - off8$correct_targets_percent
## P04
data = read.csv(logsP04, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P04 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P04 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P04 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P04 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P04 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P04 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P04 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P04 <- on8$correct_targets_percent - off8$correct_targets_percent
## P05
data = read.csv(logsP05, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P05 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P05 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P05 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P05 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P05 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P05 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P05 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P05 <- on8$correct_targets_percent - off8$correct_targets_percent
## P06
data = read.csv(logsP06, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P06 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P06 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P06 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P06 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P06 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P06 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P06 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P06 <- on8$correct_targets_percent - off8$correct_targets_percent
## P07
data = read.csv(logsP07, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P07 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P07 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P07 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P07 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P07 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P07 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P07 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P07 <- on8$correct_targets_percent - off8$correct_targets_percent
## P08
data = read.csv(logsP08, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P08 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P08 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P08 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P08 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P08 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P08 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P08 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P08 <- on8$correct_targets_percent - off8$correct_targets_percent
## P09
data = read.csv(logsP09, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P09 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P09 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P09 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P09 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P09 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P09 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P09 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P09 <- on8$correct_targets_percent - off8$correct_targets_percent
## P10
data = read.csv(logsP10, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P10 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P10 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P10 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P10 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P10 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P10 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P10 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P10 <- on8$correct_targets_percent - off8$correct_targets_percent
## P11
data = read.csv(logsP11, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P11 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P11 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P11 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P11 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P11 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P11 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P11 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P11 <- on8$correct_targets_percent - off8$correct_targets_percent
## P12
data = read.csv(logsP12, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P12 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P12 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P12 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P12 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P12 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P12 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P12 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P12 <- on8$correct_targets_percent - off8$correct_targets_percent
## P13
data = read.csv(logsP13, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P13 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P13 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P13 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P13 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P13 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P13 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P13 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P13 <- on8$correct_targets_percent - off8$correct_targets_percent
## P14
data = read.csv(logsP14, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P14 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P14 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P14 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P14 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P14 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P14 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P14 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P14 <- on8$correct_targets_percent - off8$correct_targets_percent
## P15
data = read.csv(logsP15, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P15 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P15 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P15 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P15 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P15 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P15 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P15 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P15 <- on8$correct_targets_percent - off8$correct_targets_percent
## P16
data = read.csv(logsP16, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P16 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P16 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P16 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P16 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P16 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P16 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P16 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P16 <- on8$correct_targets_percent - off8$correct_targets_percent
## P17
data = read.csv(logsP17, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P17 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P17 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P17 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P17 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P17 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P17 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P17 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P17 <- on8$correct_targets_percent - off8$correct_targets_percent
## P18
data = read.csv(logsP18, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P18 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P18 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P18 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P18 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P18 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P18 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P18 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P18 <- on8$correct_targets_percent - off8$correct_targets_percent
## P19
data = read.csv(logsP19, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P19 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P19 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P19 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P19 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P19 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P19 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P19 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P19 <- on8$correct_targets_percent - off8$correct_targets_percent
## P20
data = read.csv(logsP20, sep="\t", head=TRUE)
data <- data[data$practice != 'true',] ## Filter out practice trials
off1 <- data[data$bloc_name == 'off1',]
off2 <- data[data$bloc_name == 'off2',]
off3 <- data[data$bloc_name == 'off3',]
off4 <- data[data$bloc_name == 'off4',]
off5 <- data[data$bloc_name == 'off5',]
off6 <- data[data$bloc_name == 'off6',]
off7 <- data[data$bloc_name == 'off7',]
off8 <- data[data$bloc_name == 'off8',]
on1 <- data[data$bloc_name == 'on1',]
on2 <- data[data$bloc_name == 'on2',]
on3 <- data[data$bloc_name == 'on3',]
on4 <- data[data$bloc_name == 'on4',]
on5 <- data[data$bloc_name == 'on5',]
on6 <- data[data$bloc_name == 'on6',]
on7 <- data[data$bloc_name == 'on7',]
on8 <- data[data$bloc_name == 'on8',]
diff1_P20 <- on1$correct_targets_percent - off1$correct_targets_percent
diff2_P20 <- on2$correct_targets_percent - off2$correct_targets_percent
diff3_P20 <- on3$correct_targets_percent - off3$correct_targets_percent
diff4_P20 <- on4$correct_targets_percent - off4$correct_targets_percent
diff5_P20 <- on5$correct_targets_percent - off5$correct_targets_percent
diff6_P20 <- on6$correct_targets_percent - off6$correct_targets_percent
diff7_P20 <- on7$correct_targets_percent - off7$correct_targets_percent
diff8_P20 <- on8$correct_targets_percent - off8$correct_targets_percent
d1 <- c(diff1_P01, diff5_P01,
diff1_P02, diff5_P02,
diff1_P03, diff5_P03,
diff1_P04, diff5_P04,
diff1_P05, diff5_P05,
diff1_P06, diff5_P06,
diff1_P07, diff5_P07,
diff1_P08, diff5_P08,
diff1_P09, diff5_P09,
diff1_P10, diff5_P10,
diff1_P11, diff5_P11,
diff1_P12, diff5_P12,
diff1_P13, diff5_P13,
diff1_P14, diff5_P14,
diff1_P15, diff5_P15,
diff1_P16, diff5_P16,
diff1_P17, diff5_P17,
diff1_P18, diff5_P18,
diff1_P19, diff5_P19,
diff1_P20, diff5_P20)
d2 <- c(diff2_P01, diff6_P01,
diff2_P02, diff6_P02,
diff2_P03, diff6_P03,
diff2_P04, diff6_P04,
diff2_P05, diff6_P05,
diff2_P06, diff6_P06,
diff2_P07, diff6_P07,
diff2_P08, diff6_P08,
diff2_P09, diff6_P09,
diff2_P10, diff6_P10,
diff2_P11, diff6_P11,
diff2_P12, diff6_P12,
diff2_P13, diff6_P13,
diff2_P14, diff6_P14,
diff2_P15, diff6_P15,
diff2_P16, diff6_P16,
diff2_P17, diff6_P17,
diff2_P18, diff6_P18,
diff2_P19, diff6_P19,
diff2_P20, diff6_P20)
d3 <- c(diff3_P01, diff7_P01,
diff3_P02, diff7_P02,
diff3_P03, diff7_P03,
diff3_P04, diff7_P04,
diff3_P05, diff7_P05,
diff3_P06, diff7_P06,
diff3_P07, diff7_P07,
diff3_P08, diff7_P08,
diff3_P09, diff7_P09,
diff3_P10, diff7_P10,
diff3_P11, diff7_P11,
diff3_P12, diff7_P12,
diff3_P13, diff7_P13,
diff3_P14, diff7_P14,
diff3_P15, diff7_P15,
diff3_P16, diff7_P16,
diff3_P17, diff7_P17,
diff3_P18, diff7_P18,
diff3_P19, diff7_P19,
diff3_P20, diff7_P20)
d4 <- c(diff4_P01, diff8_P01,
diff4_P02, diff8_P02,
diff4_P03, diff8_P03,
diff4_P04, diff8_P04,
diff4_P05, diff8_P05,
diff4_P06, diff8_P06,
diff4_P07, diff8_P07,
diff4_P08, diff8_P08,
diff4_P09, diff8_P09,
diff4_P10, diff8_P10,
diff4_P11, diff8_P11,
diff4_P12, diff8_P12,
diff4_P13, diff8_P13,
diff4_P14, diff8_P14,
diff4_P15, diff8_P15,
diff4_P16, diff8_P16,
diff4_P17, diff8_P17,
diff4_P18, diff8_P18,
diff4_P19, diff8_P19,
diff4_P20, diff8_P20)
pdf(file = "/Users/fanny/Desktop/xp2-swarms-AccuracySelection.pdf", width=9, height=9)
par(mfrow=c(2,2))
boxplot(d1, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Slow')
beeswarm(d1,
col = "seagreen3", add = TRUE)
boxplot(d2, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Y order Fast')
beeswarm(d2,
col = "palegreen", add = TRUE)
boxplot(d3, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Slow')
beeswarm(d3,
col = "seagreen3", add = TRUE)
boxplot(d4, ylim = c(-1, 1),
outline = FALSE, ## avoid double-plotting outliers, if any
main = 'Smart Fast')
beeswarm(d4,
col = "palegreen", add = TRUE)
dev.off()
## dfr <- data.frame (
## d1 = d1,
## d2 = d2,
## d3 = d3,
## d4 = d4,
## y = c("p01-b1","p01-b5", "p02-b1", "p02-b1", "p03-b1","p03-b5", "p04-b1","p04-b5", "p05-b1","p05-b5",
## "p06-b1","p06-b5", "p07-b1","p07-b5", "p08-b1","p08-b5", "p09-b1","p09-b5", "p10-b1","p10-b5",
## "p11-b1","p11-b5", "p12-b1", "p12-b1", "p13-b1","p13-b5", "p14-b1","p14-b5", "p15-b1","p15-b5",
## "p16-b1","p16-b5", "p17-b1","p17-b5", "p18-b1","p18-b5", "p19-b1","p19-b5", "p20-b1","p20-b5")
## )
## boxplot(x ~ y, data = dfr,
## outline = FALSE, ## avoid double-plotting outliers, if any
## main = 'Y order Slow')
## beeswarm(x ~ y, data = dfr,
## col = rainbow(8), add = TRUE)
|
\name{bearingRhumb}
\Rdversion{1.1}
\alias{bearingRhumb}
\title{
Rhumbline direction
}
\description{
Bearing (direction of travel; true course) along a rhumb line (loxodrome) between two points.
}
\usage{
bearingRhumb(p1, p2)
}
\arguments{
\item{p1}{longitude/latitude of point(s). Can be a vector of two numbers, a matrix of 2 columns (first one is longitude, second is latitude) or a SpatialPoints* object}
\item{p2}{as above}
}
\value{
A direction (bearing) in degrees
}
\references{
\url{https://www.edwilliams.org/avform147.htm#Rhumb}
\url{https://en.wikipedia.org/wiki/Rhumb_line}
}
\author{
Chris Veness and Robert Hijmans, based on formulae by Ed Williams
}
\note{
Unlike most great circles, a rhumb line is a line of constant bearing (direction), i.e. tracks of constant true course.
The meridians and the equator are both rhumb lines and great circles. Rhumb lines approaching a pole become a tightly wound spiral.
}
\seealso{
\code{ \link[geosphere]{bearing}, \link[geosphere]{distRhumb} }
}
\examples{
bearingRhumb(c(10,10),c(20,20))
}
\keyword{ spatial }
| /man/bearingRhumb.Rd | no_license | cran/geosphere | R | false | false | 1,144 | rd | \name{bearingRhumb}
\Rdversion{1.1}
\alias{bearingRhumb}
\title{
Rhumbline direction
}
\description{
Bearing (direction of travel; true course) along a rhumb line (loxodrome) between two points.
}
\usage{
bearingRhumb(p1, p2)
}
\arguments{
\item{p1}{longitude/latitude of point(s). Can be a vector of two numbers, a matrix of 2 columns (first one is longitude, second is latitude) or a SpatialPoints* object}
\item{p2}{as above}
}
\value{
A direction (bearing) in degrees
}
\references{
\url{https://www.edwilliams.org/avform147.htm#Rhumb}
\url{https://en.wikipedia.org/wiki/Rhumb_line}
}
\author{
Chris Veness and Robert Hijmans, based on formulae by Ed Williams
}
\note{
Unlike most great circles, a rhumb line is a line of constant bearing (direction), i.e. tracks of constant true course.
The meridians and the equator are both rhumb lines and great circles. Rhumb lines approaching a pole become a tightly wound spiral.
}
\seealso{
\code{ \link[geosphere]{bearing}, \link[geosphere]{distRhumb} }
}
\examples{
bearingRhumb(c(10,10),c(20,20))
}
\keyword{ spatial }
|
#Treinamento Supervisionado
#Classificação
#Algoritomo -> Naive Bayes
# Arquivo Credito.csv
credito <- read.csv(file.choose(), sep= ',', header = T)
View(credito)
#Dividir o conjunto de dados em Treino e Teste
# Amostra -> Aleatoria simples
# Sample()
#70%, 30%
amostra <- sample(2, 1000, replace = T, prob = c(0.7, 0.3))
amostra
treino <- credito[amostra == 1,]
teste <- credito[amostra == 2,]
dim(credito)
dim(treino)
dim(teste)
install.packages("e1071", dependencies = T)
library(e1071)
#modelo preditivo
# naiveBayes()
modelo <- naiveBayes(class ~ ., treino)
modelo
#Treinamento do conjunto de teste
previsao <- predict(modelo, teste)
previsao
teste$class[1]
previsao[1]
teste$class[2]
previsao[2]
#Validação
# Matriz de confusão
confusao <- table(teste$class, previsao)
confusao
#Percentual
perc <- (confusao[1] + confusao[4]) / sum(confusao)
perc
#Arquivo NovoCredit
novaInstancia <- read.csv(file.choose(), sep= ',', header = T)
dim(novaInstancia)
#Prediçao no novo cliente
novaInstancia$class <- predict(modelo, novaInstancia)
novaInstancia$class
| /Aula 3/ML_01.R | no_license | Marcelo391/Curso-R-Coti | R | false | false | 1,083 | r | #Treinamento Supervisionado
#Classificação
#Algoritomo -> Naive Bayes
# Arquivo Credito.csv
credito <- read.csv(file.choose(), sep= ',', header = T)
View(credito)
#Dividir o conjunto de dados em Treino e Teste
# Amostra -> Aleatoria simples
# Sample()
#70%, 30%
amostra <- sample(2, 1000, replace = T, prob = c(0.7, 0.3))
amostra
treino <- credito[amostra == 1,]
teste <- credito[amostra == 2,]
dim(credito)
dim(treino)
dim(teste)
install.packages("e1071", dependencies = T)
library(e1071)
#modelo preditivo
# naiveBayes()
modelo <- naiveBayes(class ~ ., treino)
modelo
#Treinamento do conjunto de teste
previsao <- predict(modelo, teste)
previsao
teste$class[1]
previsao[1]
teste$class[2]
previsao[2]
#Validação
# Matriz de confusão
confusao <- table(teste$class, previsao)
confusao
#Percentual
perc <- (confusao[1] + confusao[4]) / sum(confusao)
perc
#Arquivo NovoCredit
novaInstancia <- read.csv(file.choose(), sep= ',', header = T)
dim(novaInstancia)
#Prediçao no novo cliente
novaInstancia$class <- predict(modelo, novaInstancia)
novaInstancia$class
|
## doPermimp is the workinghorse of the permimp methods.
## is called by all the permimp methods.
doPermimp <- function(object, input, inp, y, OOB, threshold, conditional,
whichxnames, ntree, nperm, scaled,
progressBar, thresholdDiagnostics,
w, AUC, pre1.0_0, mincriterion, asParty)
{
# Check if conditional permuation importance is possible
if (conditional) {
if(!all(complete.cases(input)))
stop("cannot compute variable importance measure with missing values")
if (conditional && threshold == 1) {
warning(sQuote("permimp"),
paste0(": Unable to permute conditionally. \n",
"The chosen threshold is too high, no variables to condition on were selected. \n",
"Instead the unconditional permimp values are computed. "),
call. = FALSE, immediate. = TRUE)
doPermimpCall <- match.call()
doPermimpCall$conditional <- FALSE
return(eval(doPermimpCall))
}
}
# select the predictors for which to compute the permutation importance
xnames <- colnames(input)
if(is.null(whichxnames)) {
whichxnames <- xnames
whichVarIDs <- seq_along(xnames)
}
else {
whichVarIDs <- match(whichxnames, table = xnames)
if(length(whichVarIDs) < 1){
stop("Error: whichxnames is not a subset of the predictor variable names in the forest.")
}
whichVarIDs <- whichVarIDs[order(whichVarIDs)]
}
# Check outcome and selet the relevant error- and pred-functions
type <- getOutcomeType(object)
error <- selectError(type, AUC)
nullError <- selectNullError(type)
pred <- selectPred(object, type, w, inp, y)
# when asParty == TRUE, collect cond list first
if (conditional && asParty) {
cond_list <- create_cond_list(binnedVars = NULL, threshold,
input, seq_along(xnames), asParty = TRUE)
}
# ## list for several permutations
# ## this array is initialized with values 0 so that a tree that does not
# ## contain the current variable adds importance 0 to its average importance
# perror <- array(0, dim = c(ntree, length(xnames), nperm),
# dimnames = list(NULL, xnames, NULL))
# ## this matrix will be used to give suggestions to de/increase the used threshold
# ## it is initialized with values NA.
# changeThres <- array(NA, dim = c(ntree, length(xnames), nperm),
# dimnames = list(NULL, xnames, NULL))
#
# start progressbar
if(progressBar) pBar <- txtProgressBar(min = 0, max = ntree,
style = 3, char = "|")
# make combine function for foreach
acomb <- function(...){abind::abind(..., along = 1)}
# set up parallel backend and register it with foreach package
parseSlurmNodeList <- function(arg1) {
#parses SLURM_NODELIST environment variable into a list of nodes (i.e. to be passed to makePSOCKcluster() or h2o.init())
#only works for a list from a single partition
#assumes node indices are 4 digits long (i.e. cn-0004, not cn-004 or cn-00004)
#if only 1 node, no parsing needed
if (!grepl("(\\[)", arg1)) {
return(arg1)
}
##get node prefix
node.prefex <- sub("\\[.*", "", arg1)
#remove the opening square bracket and everything before it
nodes <- sub(".*\\[", "", arg1)
#remove trailing square bracket
nodes <- sub("]", "", nodes)
#split list
nodes <- unlist(strsplit(nodes, ","))
##expand ranges denoted by "-"
which.collapsed <- grep("-", nodes)
for (i in which.collapsed) {
collapsed <- nodes[i]
range.bounds <- as.numeric(unlist(strsplit(collapsed, "-")))
expanded <- paste0(range.bounds[1]:range.bounds[2])
for (j in 1:length(expanded)){
while (nchar(expanded[j]) < 4){
expanded[j] <- paste0("0", expanded[j])
}
}
expanded <- paste(expanded, collapse = " ")
nodes[i] <- expanded
}
#expanded ranges were entered as space-delimited single elements in the list of nodes
#fix nodes so that each element contains a single node address
nodes <- unlist(strsplit(nodes, "\\s"))
#concatenate to required form: xx-####
nodes <- paste0(node.prefex, nodes)
return(nodes)
}
nodes <- parseSlurmNodeList(Sys.getenv("SLURM_NODELIST"))
cpus.per.node <- as.numeric(Sys.getenv("SLURM_CPUS_PER_TASK"))
where.to.make.cluster <- unlist(lapply(nodes, rep, times=cpus.per.node))
cluster <- parallel::makePSOCKcluster(where.to.make.cluster)
doParallel::registerDoParallel(cluster)
# for all trees (treeNr) in the forest
`%dorng%` <- doRNG::`%dorng%`
perror <- foreach::foreach(treeNr=seq_len(ntree), .combine = "acomb", .multicombine = TRUE, .options.rng = 456) %dorng% {
#initialize slice of perror
perror <- array(0, dim = c(1, length(xnames), nperm),
dimnames = list(NULL, xnames, NULL))
tree <- getTree(object, treeNr)
## if OOB == TRUE use only oob observations, otherwise use all observations in learning sample
if(OOB){oob <- getOOB(object, treeNr)} else {oob <- rep(TRUE, length(y))}
## prediction & error before permutation
p <- pred(tree, inp, mincriterion, -1L, input)
eoob <- error(p, oob, y)
## select variables that are used for splitting in the current tree
varsInTree <- intersect(unique(varIDs(tree)), whichVarIDs)
## Only make the binned variables based on splitting points when conditional == TRUE
if(conditional) {
## make list of variables, categorized/binned using the used splitting points
binnedVars <- makeBinnedVars(varsInTree, tree, oob, input)
if (!asParty) {
cond_list <- create_cond_list(binnedVars, threshold, input, varsInTree, asParty = FALSE)
}
}
## for all variables (j) in the tree (j = number of variables)
for(j in varsInTree){
## for every permutation
for (per in 1:nperm){
if (!conditional && !pre1.0_0){
## splitwise permutation only possible for RandomForest (party) object.
p <- pred(tree, inp, mincriterion, as.integer(j))
}
else {
if(conditional){
# changeThres[treeNr, j, per] <- 0 # if variable is in tree, NA -> 0
## only condition on variables that are in tree,
## and that are associated with the current variable
if(asParty)
varsToCondOn <- intersect(cond_list[[as.character(j)]], varsInTree)
else varsToCondOn <- cond_list[[as.character(j)]]
if(length(varsToCondOn) < 1){
## If there are no variables to condition on, conditionally permution is impossible.
## -1 corresponds to a suggestion to decrease the used threshold
# changeThres[treeNr, j, per] <- -1
perm <- sample(which(oob))
} else {
perm <- conditional_perm(varID = j, varsToCondOn,
binnedVars, oob, asParty)
}
}
else{
perm <- sample(which(oob))
}
if(is.null(perm)) {
## if conditionally permution cannot result in different outcomes:
## (a) +1 correstponds to a suggestion to increase the threshold; (treeNr) jump to next varInTree
# changeThres[treeNr, j, per] <- 1
break}
## replace premuted observations for predictor j
tmp <- replacePermVar(input, inp, permVarNr = j, oob, perm)
p <- pred(tree, tmp, mincriterion, -1L, input = tmp)
}
## run through all rows of perror
perror[1, j, per] <- (error(p, oob, y) - eoob)
} ## end of for (per in 1:nperm)
} ## end of for(j in varsInTree)
if(scaled){
perror[1, , per] <- perror[1, , per]/nullError(y, oob)
}
if(progressBar) setTxtProgressBar(pBar , treeNr)
return(perror)
} ## end of for (treeNr in 1:ntree)
# stop parallel backend
stopCluster(cluster)
perror <- apply(perror[ , whichVarIDs, , drop = FALSE], c(1, 2), mean)
perror <- as.data.frame(perror)
# if(thresholdDiagnostics){
# changeThres <- apply(changeThres[ , whichVarIDs, , drop = FALSE], 2, mean, na.rm = TRUE)
# increaseThres <- changeThres > .5
# decreaseThres <- changeThres < -.5
# if(any(increaseThres[!is.na(increaseThres)])){
# warning(sQuote("permimp"),
# paste0(" Unable to permute conditionally for ",
# sum(increaseThres),
# " variable(s) in 50 percent of the cases.\n",
# "Increasing the threshold may help. \n",
# "The variables for which conditionally permuting (often) was impossible are: ",
# ifelse(sum(increaseThres) > 6,
# paste0("(showing only six) \n - ",
# paste0(whichxnames[increaseThres][1:6], collapse = "\n - ")),
# paste0("\n - ",
# paste0(whichxnames[increaseThres], collapse = "\n - ")))),
# call. = FALSE, immediate. = TRUE)
# }
# if(any(decreaseThres[!is.na(decreaseThres)])){
# warning(sQuote("permimp"),
# paste0(" Conditionally permuting the predictor values of ",
# sum(decreaseThres),
# " variable(s) had no impact in 50 percent of the cases.\n",
# "Decreasing the threshold may help. \n",
# "The variables for which conditionally permuting (often) had no impact are: ",
# ifelse(sum(decreaseThres) > 6,
# paste0("(showing only six) \n - ",
# paste0(whichxnames[decreaseThres][1:6], collapse = "\n - ")),
# paste0("\n - ",
# paste0(whichxnames[decreaseThres], collapse = "\n - ")))),
# call. = FALSE, immediate. = TRUE)
# }
# }
info <- list()
if(conditional){
info$threshold = threshold
if (asParty) info$conditioning = "as party"
else info$conditioning = "permimp implementation"
}
info$outcomeType = type
if (info$outcomeType == "nominal2") info$outcomeType <- "binary"
if (type == "survival") info$errorType <- "Brier score"
else if (type == "regression") info$errorType <- "MSE"
else info$errorType <- "accuracy"
if(AUC && type == "nominal2") info$errorType <- "AUC"
# if(scaled) return(ScaledMeanDecreaseAccuracy = colMeans(perror)/apply(perror, 2, sd))
out <- as.VarImp(perror,
FUN = mean,
type = 'if'(conditional, "Conditional Permutation", "Permutation"),
info = info)
if(progressBar) close(pBar)
return(out)
} | /permimp/R/doPermimp.R | no_license | NolanLabNYU/PEDF-a-pleiotropic | R | false | false | 11,224 | r | ## doPermimp is the workinghorse of the permimp methods.
## is called by all the permimp methods.
doPermimp <- function(object, input, inp, y, OOB, threshold, conditional,
whichxnames, ntree, nperm, scaled,
progressBar, thresholdDiagnostics,
w, AUC, pre1.0_0, mincriterion, asParty)
{
# Check if conditional permuation importance is possible
if (conditional) {
if(!all(complete.cases(input)))
stop("cannot compute variable importance measure with missing values")
if (conditional && threshold == 1) {
warning(sQuote("permimp"),
paste0(": Unable to permute conditionally. \n",
"The chosen threshold is too high, no variables to condition on were selected. \n",
"Instead the unconditional permimp values are computed. "),
call. = FALSE, immediate. = TRUE)
doPermimpCall <- match.call()
doPermimpCall$conditional <- FALSE
return(eval(doPermimpCall))
}
}
# select the predictors for which to compute the permutation importance
xnames <- colnames(input)
if(is.null(whichxnames)) {
whichxnames <- xnames
whichVarIDs <- seq_along(xnames)
}
else {
whichVarIDs <- match(whichxnames, table = xnames)
if(length(whichVarIDs) < 1){
stop("Error: whichxnames is not a subset of the predictor variable names in the forest.")
}
whichVarIDs <- whichVarIDs[order(whichVarIDs)]
}
# Check outcome and selet the relevant error- and pred-functions
type <- getOutcomeType(object)
error <- selectError(type, AUC)
nullError <- selectNullError(type)
pred <- selectPred(object, type, w, inp, y)
# when asParty == TRUE, collect cond list first
if (conditional && asParty) {
cond_list <- create_cond_list(binnedVars = NULL, threshold,
input, seq_along(xnames), asParty = TRUE)
}
# ## list for several permutations
# ## this array is initialized with values 0 so that a tree that does not
# ## contain the current variable adds importance 0 to its average importance
# perror <- array(0, dim = c(ntree, length(xnames), nperm),
# dimnames = list(NULL, xnames, NULL))
# ## this matrix will be used to give suggestions to de/increase the used threshold
# ## it is initialized with values NA.
# changeThres <- array(NA, dim = c(ntree, length(xnames), nperm),
# dimnames = list(NULL, xnames, NULL))
#
# start progressbar
if(progressBar) pBar <- txtProgressBar(min = 0, max = ntree,
style = 3, char = "|")
# make combine function for foreach
acomb <- function(...){abind::abind(..., along = 1)}
# set up parallel backend and register it with foreach package
parseSlurmNodeList <- function(arg1) {
#parses SLURM_NODELIST environment variable into a list of nodes (i.e. to be passed to makePSOCKcluster() or h2o.init())
#only works for a list from a single partition
#assumes node indices are 4 digits long (i.e. cn-0004, not cn-004 or cn-00004)
#if only 1 node, no parsing needed
if (!grepl("(\\[)", arg1)) {
return(arg1)
}
##get node prefix
node.prefex <- sub("\\[.*", "", arg1)
#remove the opening square bracket and everything before it
nodes <- sub(".*\\[", "", arg1)
#remove trailing square bracket
nodes <- sub("]", "", nodes)
#split list
nodes <- unlist(strsplit(nodes, ","))
##expand ranges denoted by "-"
which.collapsed <- grep("-", nodes)
for (i in which.collapsed) {
collapsed <- nodes[i]
range.bounds <- as.numeric(unlist(strsplit(collapsed, "-")))
expanded <- paste0(range.bounds[1]:range.bounds[2])
for (j in 1:length(expanded)){
while (nchar(expanded[j]) < 4){
expanded[j] <- paste0("0", expanded[j])
}
}
expanded <- paste(expanded, collapse = " ")
nodes[i] <- expanded
}
#expanded ranges were entered as space-delimited single elements in the list of nodes
#fix nodes so that each element contains a single node address
nodes <- unlist(strsplit(nodes, "\\s"))
#concatenate to required form: xx-####
nodes <- paste0(node.prefex, nodes)
return(nodes)
}
nodes <- parseSlurmNodeList(Sys.getenv("SLURM_NODELIST"))
cpus.per.node <- as.numeric(Sys.getenv("SLURM_CPUS_PER_TASK"))
where.to.make.cluster <- unlist(lapply(nodes, rep, times=cpus.per.node))
cluster <- parallel::makePSOCKcluster(where.to.make.cluster)
doParallel::registerDoParallel(cluster)
# for all trees (treeNr) in the forest
`%dorng%` <- doRNG::`%dorng%`
perror <- foreach::foreach(treeNr=seq_len(ntree), .combine = "acomb", .multicombine = TRUE, .options.rng = 456) %dorng% {
#initialize slice of perror
perror <- array(0, dim = c(1, length(xnames), nperm),
dimnames = list(NULL, xnames, NULL))
tree <- getTree(object, treeNr)
## if OOB == TRUE use only oob observations, otherwise use all observations in learning sample
if(OOB){oob <- getOOB(object, treeNr)} else {oob <- rep(TRUE, length(y))}
## prediction & error before permutation
p <- pred(tree, inp, mincriterion, -1L, input)
eoob <- error(p, oob, y)
## select variables that are used for splitting in the current tree
varsInTree <- intersect(unique(varIDs(tree)), whichVarIDs)
## Only make the binned variables based on splitting points when conditional == TRUE
if(conditional) {
## make list of variables, categorized/binned using the used splitting points
binnedVars <- makeBinnedVars(varsInTree, tree, oob, input)
if (!asParty) {
cond_list <- create_cond_list(binnedVars, threshold, input, varsInTree, asParty = FALSE)
}
}
## for all variables (j) in the tree (j = number of variables)
for(j in varsInTree){
## for every permutation
for (per in 1:nperm){
if (!conditional && !pre1.0_0){
## splitwise permutation only possible for RandomForest (party) object.
p <- pred(tree, inp, mincriterion, as.integer(j))
}
else {
if(conditional){
# changeThres[treeNr, j, per] <- 0 # if variable is in tree, NA -> 0
## only condition on variables that are in tree,
## and that are associated with the current variable
if(asParty)
varsToCondOn <- intersect(cond_list[[as.character(j)]], varsInTree)
else varsToCondOn <- cond_list[[as.character(j)]]
if(length(varsToCondOn) < 1){
## If there are no variables to condition on, conditionally permution is impossible.
## -1 corresponds to a suggestion to decrease the used threshold
# changeThres[treeNr, j, per] <- -1
perm <- sample(which(oob))
} else {
perm <- conditional_perm(varID = j, varsToCondOn,
binnedVars, oob, asParty)
}
}
else{
perm <- sample(which(oob))
}
if(is.null(perm)) {
## if conditionally permution cannot result in different outcomes:
## (a) +1 correstponds to a suggestion to increase the threshold; (treeNr) jump to next varInTree
# changeThres[treeNr, j, per] <- 1
break}
## replace premuted observations for predictor j
tmp <- replacePermVar(input, inp, permVarNr = j, oob, perm)
p <- pred(tree, tmp, mincriterion, -1L, input = tmp)
}
## run through all rows of perror
perror[1, j, per] <- (error(p, oob, y) - eoob)
} ## end of for (per in 1:nperm)
} ## end of for(j in varsInTree)
if(scaled){
perror[1, , per] <- perror[1, , per]/nullError(y, oob)
}
if(progressBar) setTxtProgressBar(pBar , treeNr)
return(perror)
} ## end of for (treeNr in 1:ntree)
# stop parallel backend
stopCluster(cluster)
perror <- apply(perror[ , whichVarIDs, , drop = FALSE], c(1, 2), mean)
perror <- as.data.frame(perror)
# if(thresholdDiagnostics){
# changeThres <- apply(changeThres[ , whichVarIDs, , drop = FALSE], 2, mean, na.rm = TRUE)
# increaseThres <- changeThres > .5
# decreaseThres <- changeThres < -.5
# if(any(increaseThres[!is.na(increaseThres)])){
# warning(sQuote("permimp"),
# paste0(" Unable to permute conditionally for ",
# sum(increaseThres),
# " variable(s) in 50 percent of the cases.\n",
# "Increasing the threshold may help. \n",
# "The variables for which conditionally permuting (often) was impossible are: ",
# ifelse(sum(increaseThres) > 6,
# paste0("(showing only six) \n - ",
# paste0(whichxnames[increaseThres][1:6], collapse = "\n - ")),
# paste0("\n - ",
# paste0(whichxnames[increaseThres], collapse = "\n - ")))),
# call. = FALSE, immediate. = TRUE)
# }
# if(any(decreaseThres[!is.na(decreaseThres)])){
# warning(sQuote("permimp"),
# paste0(" Conditionally permuting the predictor values of ",
# sum(decreaseThres),
# " variable(s) had no impact in 50 percent of the cases.\n",
# "Decreasing the threshold may help. \n",
# "The variables for which conditionally permuting (often) had no impact are: ",
# ifelse(sum(decreaseThres) > 6,
# paste0("(showing only six) \n - ",
# paste0(whichxnames[decreaseThres][1:6], collapse = "\n - ")),
# paste0("\n - ",
# paste0(whichxnames[decreaseThres], collapse = "\n - ")))),
# call. = FALSE, immediate. = TRUE)
# }
# }
info <- list()
if(conditional){
info$threshold = threshold
if (asParty) info$conditioning = "as party"
else info$conditioning = "permimp implementation"
}
info$outcomeType = type
if (info$outcomeType == "nominal2") info$outcomeType <- "binary"
if (type == "survival") info$errorType <- "Brier score"
else if (type == "regression") info$errorType <- "MSE"
else info$errorType <- "accuracy"
if(AUC && type == "nominal2") info$errorType <- "AUC"
# if(scaled) return(ScaledMeanDecreaseAccuracy = colMeans(perror)/apply(perror, 2, sd))
out <- as.VarImp(perror,
FUN = mean,
type = 'if'(conditional, "Conditional Permutation", "Permutation"),
info = info)
if(progressBar) close(pBar)
return(out)
} |
pacman::p_load(tidyverse)
covid_state_url <- "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv"
covid_state_data <-
read_csv(covid_state_url) %>%
glimpse()
covid_state_data %>%
filter(state %in% c("Idaho","Utah")) %>%
ggplot(aes(x = date, y = cases)) +
geom_line() +
facet_grid(. ~ state)
covid_state_data %>%
filter(state %in% c("Idaho","Utah")) %>%
ggplot(aes(x = date, y = cases, color=state)) +
geom_line()
| /week_03/COVID_classwork.R | no_license | nicwayne/M335 | R | false | false | 466 | r | pacman::p_load(tidyverse)
covid_state_url <- "https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv"
covid_state_data <-
read_csv(covid_state_url) %>%
glimpse()
covid_state_data %>%
filter(state %in% c("Idaho","Utah")) %>%
ggplot(aes(x = date, y = cases)) +
geom_line() +
facet_grid(. ~ state)
covid_state_data %>%
filter(state %in% c("Idaho","Utah")) %>%
ggplot(aes(x = date, y = cases, color=state)) +
geom_line()
|
"bank" <-
structure(list(Status = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
Length = c(214.8, 214.6, 214.8, 214.8, 215, 215.7, 215.5,
214.5, 214.9, 215.2, 215.3, 215.1, 215.2, 214.7, 215.1, 214.5,
214.6, 215, 215.2, 214.7, 215, 215.6, 215.3, 215.7, 215.1,
215.3, 215.5, 215.1, 215.1, 214.8, 215.2, 214.8, 215, 215.6,
215.9, 214.6, 215.5, 215.3, 215.3, 213.9, 214.4, 214.8, 214.9,
214.9, 214.8, 214.3, 214.8, 214.8, 214.6, 214.5, 214.6, 215.3,
214.5, 215.4, 214.5, 215.2, 215.7, 215, 215.1, 215.1, 215.1,
215.3, 215.4, 214.5, 215, 215.2, 214.6, 214.8, 215.1, 214.9,
213.8, 215.2, 215, 214.4, 215.2, 214.1, 214.9, 214.6, 215.2,
214.6, 215.1, 214.9, 215.2, 215.2, 215.4, 215.1, 215.2, 215,
214.9, 215, 214.7, 215.4, 214.9, 214.5, 214.7, 215.6, 215,
214.4, 215.1, 214.7, 214.4, 214.9, 214.9, 215, 214.7, 215,
215.3, 214.8, 215, 215.2, 215.2, 215.1, 215.4, 214.9, 215.1,
215.5, 214.7, 214.7, 214.8, 214.4, 214.8, 215.1, 215.3, 215.1,
214.7, 214.9, 215, 215.5, 215.1, 214.5, 214.3, 214.5, 214.9,
214.6, 214.2, 214.8, 214.6, 214.9, 214.6, 214.5, 214.8, 214.7,
214.6, 215, 214.5, 214.9, 215, 215.3, 214.7, 214.9, 214.9,
214.6, 214.6, 214.5, 214.5, 215.1, 214.2, 214.4, 214.8, 214.6,
215.6, 214.9, 214.6, 214.7, 214.3, 215.1, 216.3, 215.6, 214.8,
214.9, 213.9, 214.2, 214.8, 214.8, 214.8, 214.9, 214.3, 214.5,
214.8, 214.5, 215, 214.8, 215, 214.6, 214.7, 214.7, 214.5,
214.8, 214.8, 214.6, 215.1, 215.4, 214.7, 215, 214.9, 215,
215.1, 214.8, 214.7, 214.3), Left = c(131, 129.7, 129.7,
129.7, 129.6, 130.8, 129.5, 129.6, 129.4, 130.4, 130.4, 129.5,
130.8, 129.7, 129.9, 129.8, 129.9, 129.9, 129.6, 130.2, 129.9,
130.5, 130.6, 130.2, 129.7, 130.4, 130.2, 130.3, 130, 129.7,
130.1, 129.7, 130, 130.4, 130.4, 130.2, 130.3, 129.9, 130.3,
130.3, 129.8, 130.1, 129.6, 130.4, 129.4, 129.5, 129.9, 129.9,
129.7, 129, 129.8, 130.6, 130.1, 130.2, 129.4, 129.7, 130,
129.6, 130.1, 130, 129.6, 129.7, 129.8, 130, 130, 130.6,
129.5, 129.7, 129.6, 130.2, 129.8, 129.9, 129.6, 129.9, 129.9,
129.6, 129.9, 129.8, 130.5, 129.9, 129.7, 129.8, 129.7, 130.1,
130.7, 129.9, 129.9, 129.6, 130.3, 129.9, 129.7, 130, 129.4,
129.5, 129.6, 129.9, 130.4, 129.7, 130, 130, 130.1, 130.5,
130.3, 130.4, 130.2, 130.2, 130.3, 130.1, 130.2, 130.6, 130.4,
130.5, 130.7, 130.4, 130.3, 130.4, 130.6, 130.4, 130.5, 130.2,
130.3, 130.6, 130.8, 130.7, 130.5, 130, 130.4, 130.7, 130.2,
130.2, 130.2, 130.2, 130.5, 130.2, 130, 130.1, 129.8, 130.7,
130.4, 130.5, 130.2, 130, 130.2, 130.5, 129.8, 130.6, 130.5,
130.6, 130.2, 129.9, 130.3, 129.9, 129.7, 130.1, 130.3, 130,
129.7, 130.1, 130.4, 130.6, 130.1, 130.5, 130.1, 130.1, 130.3,
130.3, 130.7, 130.4, 129.9, 130, 130.7, 130.6, 130.5, 129.6,
130.1, 130.4, 130.1, 130.4, 130.5, 130.2, 130.4, 130.6, 130.5,
130.5, 130.2, 130.4, 130.4, 130, 129.9, 130.3, 130.2, 130.5,
130.3, 130.5, 130.3, 130.4, 130.3, 130.3, 130.7, 129.9),
Right = c(131.1, 129.7, 129.7, 129.6, 129.7, 130.5, 129.7,
129.2, 129.7, 130.3, 130.3, 129.6, 129.6, 129.7, 129.7, 129.8,
130.1, 129.7, 129.6, 129.9, 129.3, 130, 130, 130, 129.9,
130.4, 130.1, 130.3, 130, 129.3, 129.8, 129.7, 129.6, 130.1,
130, 130.2, 130, 129.4, 130.1, 129, 129.2, 129.6, 129.4,
129.7, 129.1, 129.4, 129.7, 129.7, 129.8, 129.6, 129.4, 130,
130, 130.2, 129.5, 129.4, 129.4, 129.4, 129.9, 129.8, 129.3,
129.4, 129.4, 129.5, 129.8, 130, 129.2, 129.3, 129.8, 130.2,
129.5, 129.5, 130.2, 129.6, 129.7, 129.3, 130.1, 129.4, 129.8,
129.4, 129.7, 129.6, 129.1, 129.9, 130.2, 129.6, 129.7, 129.2,
129.9, 129.7, 129.3, 129.9, 129.5, 129.3, 129.5, 129.9, 130.3,
129.5, 129.8, 129.4, 130.3, 130.2, 130.1, 130.6, 130.3, 130.2,
130.1, 130.4, 129.9, 130.8, 130.3, 130.3, 131.1, 129.9, 130,
130, 130.1, 130.1, 130.2, 129.9, 130.4, 130.3, 131.1, 130.4,
130.5, 130.3, 130.4, 130.3, 130.2, 130.6, 130, 129.8, 130.2,
130.4, 130.2, 130.1, 130.2, 130.3, 130.4, 130.2, 130.3, 129.4,
130.4, 130.4, 129.8, 130.4, 130.4, 130.3, 130.1, 130, 129.9,
129.7, 129.3, 130.1, 130, 130.3, 129.6, 130, 130.6, 130.1,
129.7, 130.1, 130, 130.2, 130, 130.6, 130.4, 130.1, 129.8,
129.9, 130.5, 130.4, 130.3, 130, 130, 130.2, 130.1, 130,
130.3, 130.4, 130.1, 130.6, 130.1, 130.4, 130.1, 130, 130,
129.7, 130.2, 130.2, 129.8, 130.6, 130.2, 130.3, 130.5, 130.3,
129.9, 130.4, 130.8, 129.9), Bottom = c(9, 8.1, 8.7, 7.5,
10.4, 9, 7.9, 7.2, 8.2, 9.2, 7.9, 7.7, 7.9, 7.7, 7.7, 9.3,
8.2, 9, 7.4, 8.6, 8.4, 8.1, 8.4, 8.7, 7.4, 8, 8.9, 9.8, 7.4,
8.3, 7.9, 8.6, 7.7, 8.4, 8.9, 9.4, 8.4, 7.9, 8.5, 8.1, 8.9,
8.8, 9.3, 9, 8.2, 8.3, 8.3, 7.3, 7.9, 7.8, 7.2, 9.5, 7.8,
7.6, 7.9, 9.2, 9.2, 8.8, 7.9, 8.2, 8.3, 7.5, 8, 8, 8.6, 8.8,
7.7, 9.1, 8.6, 8, 8.4, 8.2, 8.7, 7.5, 7.2, 7.6, 8.8, 7.4,
7.9, 7.9, 8.6, 7.5, 9, 7.9, 9, 8.9, 8.7, 8.4, 7.4, 8, 8.6,
8.5, 8.2, 7.4, 8.3, 9, 9.1, 8, 9.1, 7.8, 9.7, 11, 8.7, 9.9,
11.8, 10.6, 9.3, 9.8, 10, 10.4, 8, 10.6, 9.7, 11.4, 10.6,
8.2, 11.8, 12.1, 11, 10.1, 10.1, 12.3, 11.6, 10.5, 9.9, 10.2,
9.4, 10.2, 10.1, 9.8, 10.7, 12.3, 10.6, 10.5, 11, 11.9, 10.7,
9.3, 11.3, 11.8, 10, 10.2, 11.2, 10.6, 11.4, 11.9, 11.4,
9.3, 10.7, 9.9, 11.9, 11.9, 10.4, 12.1, 11, 11.6, 10.3, 11.3,
12.5, 8.1, 7.4, 9.9, 11.5, 11.6, 11.4, 10.3, 10, 9.6, 9.6,
11.4, 8.7, 12, 11.8, 10.4, 11.4, 11.9, 11.6, 9.9, 10.2, 8.2,
11.4, 8, 11, 10.1, 10.7, 11.5, 8, 11.4, 9.6, 12.7, 10.2,
8.8, 10.8, 9.6, 11.6, 9.9, 10.3, 10.6, 11.2, 10.2), Top = c(9.7,
9.5, 9.6, 10.4, 7.7, 10.1, 9.6, 10.7, 11, 10, 11.7, 10.5,
10.8, 10.9, 10.8, 8.5, 9.8, 9, 11.5, 10, 10, 10.3, 10.8,
10, 10.8, 11, 9.8, 9.5, 10.5, 9, 10.7, 9.1, 10.5, 10.3, 10.6,
9.7, 9.7, 10, 9.3, 9.7, 9.4, 9.9, 9, 9.8, 10.2, 10.2, 10.2,
10.9, 10.3, 9.8, 10, 9.7, 10.9, 10.9, 10, 9.4, 10.4, 9, 11,
10.3, 9.9, 10.5, 10.6, 10.8, 10.6, 10.6, 10.3, 9.5, 9.8,
11.2, 11.1, 10.3, 10, 10.5, 10.6, 10.7, 10, 10.6, 10.9, 10,
10.3, 10.3, 9.7, 10.8, 11.1, 10.2, 9.5, 10.2, 11.2, 10.5,
9.6, 9.7, 9.9, 10.7, 10, 9.5, 10.2, 10.3, 10.2, 10, 11.7,
11.5, 11.7, 10.9, 10.9, 10.7, 12.1, 11.5, 11.9, 11.2, 11.5,
11.5, 11.8, 11, 10.8, 11.2, 10.5, 10.4, 11, 12, 12.1, 10.2,
10.6, 11.2, 10.3, 11.4, 11.6, 11.8, 11.3, 12.1, 10.5, 11.2,
11.5, 11.8, 11.2, 11.1, 11.1, 11.2, 10.8, 10.2, 11.9, 11,
10.7, 11.1, 10, 10.5, 10.7, 11.3, 11, 12.3, 10.6, 10.1, 11,
10.3, 11.5, 10.5, 11.4, 10.7, 10, 12.1, 12.2, 10.2, 10.6,
10.9, 10.5, 12, 10.1, 11.2, 12, 10.9, 11.5, 10.2, 10.5, 11.6,
10.5, 10.7, 10.5, 12, 12.1, 11.8, 10.7, 11.4, 11.4, 11.4,
11.1, 10.7, 12.2, 10.6, 11.9, 9.1, 12, 11, 11.1, 11, 10.6,
12.1, 11.5, 11.1, 11.2, 11.5), Diagonal = c(141, 141.7, 142.2,
142, 141.8, 141.4, 141.6, 141.7, 141.9, 140.7, 141.8, 142.2,
141.4, 141.7, 141.8, 141.6, 141.7, 141.9, 141.5, 141.9, 141.4,
141.6, 141.5, 141.6, 141.1, 142.3, 142.4, 141.9, 141.8, 142,
141.8, 142.3, 140.7, 141, 141.4, 141.8, 141.8, 142, 142.1,
141.3, 142.3, 140.9, 141.7, 140.9, 141, 141.8, 141.5, 142,
141.1, 142, 141.3, 141.1, 140.9, 141.6, 141.4, 142, 141.2,
141.1, 141.3, 141.4, 141.6, 141.5, 141.5, 141.4, 141.5, 140.8,
141.3, 141.5, 141.8, 139.6, 140.9, 141.4, 141.2, 141.8, 142.1,
141.7, 141.2, 141, 140.9, 141.8, 140.6, 141, 141.9, 141.3,
141.2, 141.5, 141.6, 142.1, 141.5, 142, 141.6, 141.4, 141.5,
141.5, 142, 141.7, 141.1, 141.2, 141.5, 141.2, 139.8, 139.5,
140.2, 140.3, 139.7, 139.9, 140.2, 139.9, 139.4, 140.3, 139.2,
140.1, 140.6, 139.9, 139.7, 139.2, 139.8, 139.9, 140, 139.2,
139.6, 139.6, 140.2, 139.7, 140.1, 139.6, 140.2, 140, 140.3,
139.9, 139.8, 139.2, 139.9, 139.7, 139.5, 139.5, 139.4, 138.3,
139.8, 139.6, 139.3, 139.2, 139.9, 139.9, 139.3, 139.8, 139.9,
138.1, 139.4, 139.4, 139.8, 139, 139.3, 139.4, 139.5, 139.7,
139.5, 139.2, 139.3, 137.9, 138.4, 138.1, 139.5, 139.1, 139.8,
139.7, 138.8, 138.6, 139.6, 139.7, 137.8, 139.6, 139.4, 139.2,
139.6, 139, 139.7, 139.6, 139.1, 137.8, 139.1, 138.7, 139.3,
139.3, 139.5, 139.4, 138.5, 139.2, 139.4, 139.2, 139.4, 138.6,
139.2, 138.5, 139.8, 139.6, 139.7, 140, 139.4, 139.6)), .Names = c("Status",
"Length", "Left", "Right", "Bottom", "Top", "Diagonal"), class = "data.frame", row.names = c("1",
"2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13",
"14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24",
"25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35",
"36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46",
"47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57",
"58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68",
"69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79",
"80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90",
"91", "92", "93", "94", "95", "96", "97", "98", "99", "100",
"101", "102", "103", "104", "105", "106", "107", "108", "109",
"110", "111", "112", "113", "114", "115", "116", "117", "118",
"119", "120", "121", "122", "123", "124", "125", "126", "127",
"128", "129", "130", "131", "132", "133", "134", "135", "136",
"137", "138", "139", "140", "141", "142", "143", "144", "145",
"146", "147", "148", "149", "150", "151", "152", "153", "154",
"155", "156", "157", "158", "159", "160", "161", "162", "163",
"164", "165", "166", "167", "168", "169", "170", "171", "172",
"173", "174", "175", "176", "177", "178", "179", "180", "181",
"182", "183", "184", "185", "186", "187", "188", "189", "190",
"191", "192", "193", "194", "195", "196", "197", "198", "199",
"200"))
| /bank.R | no_license | StaThin/data | R | false | false | 10,341 | r | "bank" <-
structure(list(Status = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
Length = c(214.8, 214.6, 214.8, 214.8, 215, 215.7, 215.5,
214.5, 214.9, 215.2, 215.3, 215.1, 215.2, 214.7, 215.1, 214.5,
214.6, 215, 215.2, 214.7, 215, 215.6, 215.3, 215.7, 215.1,
215.3, 215.5, 215.1, 215.1, 214.8, 215.2, 214.8, 215, 215.6,
215.9, 214.6, 215.5, 215.3, 215.3, 213.9, 214.4, 214.8, 214.9,
214.9, 214.8, 214.3, 214.8, 214.8, 214.6, 214.5, 214.6, 215.3,
214.5, 215.4, 214.5, 215.2, 215.7, 215, 215.1, 215.1, 215.1,
215.3, 215.4, 214.5, 215, 215.2, 214.6, 214.8, 215.1, 214.9,
213.8, 215.2, 215, 214.4, 215.2, 214.1, 214.9, 214.6, 215.2,
214.6, 215.1, 214.9, 215.2, 215.2, 215.4, 215.1, 215.2, 215,
214.9, 215, 214.7, 215.4, 214.9, 214.5, 214.7, 215.6, 215,
214.4, 215.1, 214.7, 214.4, 214.9, 214.9, 215, 214.7, 215,
215.3, 214.8, 215, 215.2, 215.2, 215.1, 215.4, 214.9, 215.1,
215.5, 214.7, 214.7, 214.8, 214.4, 214.8, 215.1, 215.3, 215.1,
214.7, 214.9, 215, 215.5, 215.1, 214.5, 214.3, 214.5, 214.9,
214.6, 214.2, 214.8, 214.6, 214.9, 214.6, 214.5, 214.8, 214.7,
214.6, 215, 214.5, 214.9, 215, 215.3, 214.7, 214.9, 214.9,
214.6, 214.6, 214.5, 214.5, 215.1, 214.2, 214.4, 214.8, 214.6,
215.6, 214.9, 214.6, 214.7, 214.3, 215.1, 216.3, 215.6, 214.8,
214.9, 213.9, 214.2, 214.8, 214.8, 214.8, 214.9, 214.3, 214.5,
214.8, 214.5, 215, 214.8, 215, 214.6, 214.7, 214.7, 214.5,
214.8, 214.8, 214.6, 215.1, 215.4, 214.7, 215, 214.9, 215,
215.1, 214.8, 214.7, 214.3), Left = c(131, 129.7, 129.7,
129.7, 129.6, 130.8, 129.5, 129.6, 129.4, 130.4, 130.4, 129.5,
130.8, 129.7, 129.9, 129.8, 129.9, 129.9, 129.6, 130.2, 129.9,
130.5, 130.6, 130.2, 129.7, 130.4, 130.2, 130.3, 130, 129.7,
130.1, 129.7, 130, 130.4, 130.4, 130.2, 130.3, 129.9, 130.3,
130.3, 129.8, 130.1, 129.6, 130.4, 129.4, 129.5, 129.9, 129.9,
129.7, 129, 129.8, 130.6, 130.1, 130.2, 129.4, 129.7, 130,
129.6, 130.1, 130, 129.6, 129.7, 129.8, 130, 130, 130.6,
129.5, 129.7, 129.6, 130.2, 129.8, 129.9, 129.6, 129.9, 129.9,
129.6, 129.9, 129.8, 130.5, 129.9, 129.7, 129.8, 129.7, 130.1,
130.7, 129.9, 129.9, 129.6, 130.3, 129.9, 129.7, 130, 129.4,
129.5, 129.6, 129.9, 130.4, 129.7, 130, 130, 130.1, 130.5,
130.3, 130.4, 130.2, 130.2, 130.3, 130.1, 130.2, 130.6, 130.4,
130.5, 130.7, 130.4, 130.3, 130.4, 130.6, 130.4, 130.5, 130.2,
130.3, 130.6, 130.8, 130.7, 130.5, 130, 130.4, 130.7, 130.2,
130.2, 130.2, 130.2, 130.5, 130.2, 130, 130.1, 129.8, 130.7,
130.4, 130.5, 130.2, 130, 130.2, 130.5, 129.8, 130.6, 130.5,
130.6, 130.2, 129.9, 130.3, 129.9, 129.7, 130.1, 130.3, 130,
129.7, 130.1, 130.4, 130.6, 130.1, 130.5, 130.1, 130.1, 130.3,
130.3, 130.7, 130.4, 129.9, 130, 130.7, 130.6, 130.5, 129.6,
130.1, 130.4, 130.1, 130.4, 130.5, 130.2, 130.4, 130.6, 130.5,
130.5, 130.2, 130.4, 130.4, 130, 129.9, 130.3, 130.2, 130.5,
130.3, 130.5, 130.3, 130.4, 130.3, 130.3, 130.7, 129.9),
Right = c(131.1, 129.7, 129.7, 129.6, 129.7, 130.5, 129.7,
129.2, 129.7, 130.3, 130.3, 129.6, 129.6, 129.7, 129.7, 129.8,
130.1, 129.7, 129.6, 129.9, 129.3, 130, 130, 130, 129.9,
130.4, 130.1, 130.3, 130, 129.3, 129.8, 129.7, 129.6, 130.1,
130, 130.2, 130, 129.4, 130.1, 129, 129.2, 129.6, 129.4,
129.7, 129.1, 129.4, 129.7, 129.7, 129.8, 129.6, 129.4, 130,
130, 130.2, 129.5, 129.4, 129.4, 129.4, 129.9, 129.8, 129.3,
129.4, 129.4, 129.5, 129.8, 130, 129.2, 129.3, 129.8, 130.2,
129.5, 129.5, 130.2, 129.6, 129.7, 129.3, 130.1, 129.4, 129.8,
129.4, 129.7, 129.6, 129.1, 129.9, 130.2, 129.6, 129.7, 129.2,
129.9, 129.7, 129.3, 129.9, 129.5, 129.3, 129.5, 129.9, 130.3,
129.5, 129.8, 129.4, 130.3, 130.2, 130.1, 130.6, 130.3, 130.2,
130.1, 130.4, 129.9, 130.8, 130.3, 130.3, 131.1, 129.9, 130,
130, 130.1, 130.1, 130.2, 129.9, 130.4, 130.3, 131.1, 130.4,
130.5, 130.3, 130.4, 130.3, 130.2, 130.6, 130, 129.8, 130.2,
130.4, 130.2, 130.1, 130.2, 130.3, 130.4, 130.2, 130.3, 129.4,
130.4, 130.4, 129.8, 130.4, 130.4, 130.3, 130.1, 130, 129.9,
129.7, 129.3, 130.1, 130, 130.3, 129.6, 130, 130.6, 130.1,
129.7, 130.1, 130, 130.2, 130, 130.6, 130.4, 130.1, 129.8,
129.9, 130.5, 130.4, 130.3, 130, 130, 130.2, 130.1, 130,
130.3, 130.4, 130.1, 130.6, 130.1, 130.4, 130.1, 130, 130,
129.7, 130.2, 130.2, 129.8, 130.6, 130.2, 130.3, 130.5, 130.3,
129.9, 130.4, 130.8, 129.9), Bottom = c(9, 8.1, 8.7, 7.5,
10.4, 9, 7.9, 7.2, 8.2, 9.2, 7.9, 7.7, 7.9, 7.7, 7.7, 9.3,
8.2, 9, 7.4, 8.6, 8.4, 8.1, 8.4, 8.7, 7.4, 8, 8.9, 9.8, 7.4,
8.3, 7.9, 8.6, 7.7, 8.4, 8.9, 9.4, 8.4, 7.9, 8.5, 8.1, 8.9,
8.8, 9.3, 9, 8.2, 8.3, 8.3, 7.3, 7.9, 7.8, 7.2, 9.5, 7.8,
7.6, 7.9, 9.2, 9.2, 8.8, 7.9, 8.2, 8.3, 7.5, 8, 8, 8.6, 8.8,
7.7, 9.1, 8.6, 8, 8.4, 8.2, 8.7, 7.5, 7.2, 7.6, 8.8, 7.4,
7.9, 7.9, 8.6, 7.5, 9, 7.9, 9, 8.9, 8.7, 8.4, 7.4, 8, 8.6,
8.5, 8.2, 7.4, 8.3, 9, 9.1, 8, 9.1, 7.8, 9.7, 11, 8.7, 9.9,
11.8, 10.6, 9.3, 9.8, 10, 10.4, 8, 10.6, 9.7, 11.4, 10.6,
8.2, 11.8, 12.1, 11, 10.1, 10.1, 12.3, 11.6, 10.5, 9.9, 10.2,
9.4, 10.2, 10.1, 9.8, 10.7, 12.3, 10.6, 10.5, 11, 11.9, 10.7,
9.3, 11.3, 11.8, 10, 10.2, 11.2, 10.6, 11.4, 11.9, 11.4,
9.3, 10.7, 9.9, 11.9, 11.9, 10.4, 12.1, 11, 11.6, 10.3, 11.3,
12.5, 8.1, 7.4, 9.9, 11.5, 11.6, 11.4, 10.3, 10, 9.6, 9.6,
11.4, 8.7, 12, 11.8, 10.4, 11.4, 11.9, 11.6, 9.9, 10.2, 8.2,
11.4, 8, 11, 10.1, 10.7, 11.5, 8, 11.4, 9.6, 12.7, 10.2,
8.8, 10.8, 9.6, 11.6, 9.9, 10.3, 10.6, 11.2, 10.2), Top = c(9.7,
9.5, 9.6, 10.4, 7.7, 10.1, 9.6, 10.7, 11, 10, 11.7, 10.5,
10.8, 10.9, 10.8, 8.5, 9.8, 9, 11.5, 10, 10, 10.3, 10.8,
10, 10.8, 11, 9.8, 9.5, 10.5, 9, 10.7, 9.1, 10.5, 10.3, 10.6,
9.7, 9.7, 10, 9.3, 9.7, 9.4, 9.9, 9, 9.8, 10.2, 10.2, 10.2,
10.9, 10.3, 9.8, 10, 9.7, 10.9, 10.9, 10, 9.4, 10.4, 9, 11,
10.3, 9.9, 10.5, 10.6, 10.8, 10.6, 10.6, 10.3, 9.5, 9.8,
11.2, 11.1, 10.3, 10, 10.5, 10.6, 10.7, 10, 10.6, 10.9, 10,
10.3, 10.3, 9.7, 10.8, 11.1, 10.2, 9.5, 10.2, 11.2, 10.5,
9.6, 9.7, 9.9, 10.7, 10, 9.5, 10.2, 10.3, 10.2, 10, 11.7,
11.5, 11.7, 10.9, 10.9, 10.7, 12.1, 11.5, 11.9, 11.2, 11.5,
11.5, 11.8, 11, 10.8, 11.2, 10.5, 10.4, 11, 12, 12.1, 10.2,
10.6, 11.2, 10.3, 11.4, 11.6, 11.8, 11.3, 12.1, 10.5, 11.2,
11.5, 11.8, 11.2, 11.1, 11.1, 11.2, 10.8, 10.2, 11.9, 11,
10.7, 11.1, 10, 10.5, 10.7, 11.3, 11, 12.3, 10.6, 10.1, 11,
10.3, 11.5, 10.5, 11.4, 10.7, 10, 12.1, 12.2, 10.2, 10.6,
10.9, 10.5, 12, 10.1, 11.2, 12, 10.9, 11.5, 10.2, 10.5, 11.6,
10.5, 10.7, 10.5, 12, 12.1, 11.8, 10.7, 11.4, 11.4, 11.4,
11.1, 10.7, 12.2, 10.6, 11.9, 9.1, 12, 11, 11.1, 11, 10.6,
12.1, 11.5, 11.1, 11.2, 11.5), Diagonal = c(141, 141.7, 142.2,
142, 141.8, 141.4, 141.6, 141.7, 141.9, 140.7, 141.8, 142.2,
141.4, 141.7, 141.8, 141.6, 141.7, 141.9, 141.5, 141.9, 141.4,
141.6, 141.5, 141.6, 141.1, 142.3, 142.4, 141.9, 141.8, 142,
141.8, 142.3, 140.7, 141, 141.4, 141.8, 141.8, 142, 142.1,
141.3, 142.3, 140.9, 141.7, 140.9, 141, 141.8, 141.5, 142,
141.1, 142, 141.3, 141.1, 140.9, 141.6, 141.4, 142, 141.2,
141.1, 141.3, 141.4, 141.6, 141.5, 141.5, 141.4, 141.5, 140.8,
141.3, 141.5, 141.8, 139.6, 140.9, 141.4, 141.2, 141.8, 142.1,
141.7, 141.2, 141, 140.9, 141.8, 140.6, 141, 141.9, 141.3,
141.2, 141.5, 141.6, 142.1, 141.5, 142, 141.6, 141.4, 141.5,
141.5, 142, 141.7, 141.1, 141.2, 141.5, 141.2, 139.8, 139.5,
140.2, 140.3, 139.7, 139.9, 140.2, 139.9, 139.4, 140.3, 139.2,
140.1, 140.6, 139.9, 139.7, 139.2, 139.8, 139.9, 140, 139.2,
139.6, 139.6, 140.2, 139.7, 140.1, 139.6, 140.2, 140, 140.3,
139.9, 139.8, 139.2, 139.9, 139.7, 139.5, 139.5, 139.4, 138.3,
139.8, 139.6, 139.3, 139.2, 139.9, 139.9, 139.3, 139.8, 139.9,
138.1, 139.4, 139.4, 139.8, 139, 139.3, 139.4, 139.5, 139.7,
139.5, 139.2, 139.3, 137.9, 138.4, 138.1, 139.5, 139.1, 139.8,
139.7, 138.8, 138.6, 139.6, 139.7, 137.8, 139.6, 139.4, 139.2,
139.6, 139, 139.7, 139.6, 139.1, 137.8, 139.1, 138.7, 139.3,
139.3, 139.5, 139.4, 138.5, 139.2, 139.4, 139.2, 139.4, 138.6,
139.2, 138.5, 139.8, 139.6, 139.7, 140, 139.4, 139.6)), .Names = c("Status",
"Length", "Left", "Right", "Bottom", "Top", "Diagonal"), class = "data.frame", row.names = c("1",
"2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13",
"14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24",
"25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35",
"36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46",
"47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57",
"58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68",
"69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79",
"80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90",
"91", "92", "93", "94", "95", "96", "97", "98", "99", "100",
"101", "102", "103", "104", "105", "106", "107", "108", "109",
"110", "111", "112", "113", "114", "115", "116", "117", "118",
"119", "120", "121", "122", "123", "124", "125", "126", "127",
"128", "129", "130", "131", "132", "133", "134", "135", "136",
"137", "138", "139", "140", "141", "142", "143", "144", "145",
"146", "147", "148", "149", "150", "151", "152", "153", "154",
"155", "156", "157", "158", "159", "160", "161", "162", "163",
"164", "165", "166", "167", "168", "169", "170", "171", "172",
"173", "174", "175", "176", "177", "178", "179", "180", "181",
"182", "183", "184", "185", "186", "187", "188", "189", "190",
"191", "192", "193", "194", "195", "196", "197", "198", "199",
"200"))
|
# setwd("B:/Data/public/Coursera/r-prog/ProgrammingAssignment3")
rankhospital <- function(state, outcome, num = "best"){
## Read outcome data
outcomes <- c('heart attack' = 11,
'heart failure' = 17,
'pneumonia' = 23)
df.outcome <- read.csv("data/outcome-of-care-measures.csv",
stringsAsFactors = FALSE,
na.strings = "Not Available")
## Check that the state and outcome are valid
# If invalid state, "invalid state"
if((state %in% df.outcome$State)=="FALSE") stop("invalid state")
# if invalid outcome, "invalid outcome"
if((outcome %in% names(outcomes))=="FALSE") stop("invalid outcome")
## Return hospital name in the state with the given rank
## 30-day death rate
scores <- df.outcome[which(df.outcome$State==state),
c(2,7,outcomes[outcome])]
scores.omit <- na.omit(scores)
scores.order <- scores.omit[ order(
scores.omit[,3],
scores.omit[,1]),]
if(num=="best") scores.order[1,1] else
if(num=="worst") scores.order[nrow(scores.order),1] else
scores.order[num,1]
}
| /rankhospital.R | no_license | lantzts/ProgrammingAssignment3 | R | false | false | 1,364 | r | # setwd("B:/Data/public/Coursera/r-prog/ProgrammingAssignment3")
rankhospital <- function(state, outcome, num = "best"){
## Read outcome data
outcomes <- c('heart attack' = 11,
'heart failure' = 17,
'pneumonia' = 23)
df.outcome <- read.csv("data/outcome-of-care-measures.csv",
stringsAsFactors = FALSE,
na.strings = "Not Available")
## Check that the state and outcome are valid
# If invalid state, "invalid state"
if((state %in% df.outcome$State)=="FALSE") stop("invalid state")
# if invalid outcome, "invalid outcome"
if((outcome %in% names(outcomes))=="FALSE") stop("invalid outcome")
## Return hospital name in the state with the given rank
## 30-day death rate
scores <- df.outcome[which(df.outcome$State==state),
c(2,7,outcomes[outcome])]
scores.omit <- na.omit(scores)
scores.order <- scores.omit[ order(
scores.omit[,3],
scores.omit[,1]),]
if(num=="best") scores.order[1,1] else
if(num=="worst") scores.order[nrow(scores.order),1] else
scores.order[num,1]
}
|
library("Biostrings")
library("ShortRead")
# Path to file
readPath <- system.file('extdata', "sampleSequences.fastq", package = 'STRMPS')
# Flanking regions
data("flankingRegions")
# Read the file into memory
readFile <- readFastq(readPath)
sread(readFile)
quality(readFile)
# Identify the STR's of the file, both readPath and readFile can be used.
\donttest{
identifySTRRegions(reads = readFile, flankingRegions = flankingRegions,
numberOfMutation = 1,
control = identifySTRRegions.control(
numberOfThreads = 1,
includeReverseComplement = FALSE)
)
}
| /inst/examples/identify.R | no_license | cran/STRMPS | R | false | false | 658 | r | library("Biostrings")
library("ShortRead")
# Path to file
readPath <- system.file('extdata', "sampleSequences.fastq", package = 'STRMPS')
# Flanking regions
data("flankingRegions")
# Read the file into memory
readFile <- readFastq(readPath)
sread(readFile)
quality(readFile)
# Identify the STR's of the file, both readPath and readFile can be used.
\donttest{
identifySTRRegions(reads = readFile, flankingRegions = flankingRegions,
numberOfMutation = 1,
control = identifySTRRegions.control(
numberOfThreads = 1,
includeReverseComplement = FALSE)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dirs.R
\name{NEONMICROBE_DIR_SOIL}
\alias{NEONMICROBE_DIR_SOIL}
\title{Dynamic Directory Name for Soil Data}
\usage{
NEONMICROBE_DIR_SOIL()
}
\value{
Directory path (character).
}
\description{
For NEON soil data DP1.10086.001:
"Soil physical and chemical properties, periodic",
tables sls_soilCoreCollection, sls_soilMoisture, sls_soilpH, and sls_soilChemistry.
}
| /man/NEONMICROBE_DIR_SOIL.Rd | no_license | naithanilab/neonMicrobe | R | false | true | 443 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dirs.R
\name{NEONMICROBE_DIR_SOIL}
\alias{NEONMICROBE_DIR_SOIL}
\title{Dynamic Directory Name for Soil Data}
\usage{
NEONMICROBE_DIR_SOIL()
}
\value{
Directory path (character).
}
\description{
For NEON soil data DP1.10086.001:
"Soil physical and chemical properties, periodic",
tables sls_soilCoreCollection, sls_soilMoisture, sls_soilpH, and sls_soilChemistry.
}
|
#' Build a spline random forest.
#'
#' Builds an ensemble of regression trees for longitudinal or functional data using the spline projection method. The resulting model
#' contains a list of spline trees along with some additional information. All parameters are used in the same way that they are used in
#' the splineTree() function. The additional parameter ntree specifies how many trees should be in the ensemble, and prob controls the
#' probability of selecting a given variable for split consideration at a node. This method may take several minutes to run- saving the forest after
#' building it is recommended.
#'
#' The ensemble method is highly similar to the random forest methodology of Breiman (2001). Each tree in the ensemble is fit to a random sample
#' of 63.5% of the data (sampled without replacement). At each node of each tree, only a subset of the split variables are considered candidates for the split. In our methodology,
#' the subset of variables considered at each node is determined by a random process. The prob parameter specifies the probability that a given variable
#' will be selected at a certain node. Because the method is based on probability, the same number of variables are not considered for splitting at each node
#' (as in the randomForest package). Note that if prob is small and the number of variables in the splitFormula is also small, there is a high probability that
#' no variables will be considered for splitting at a certain node, which is problematic. The fewer total variables there are, the larger prob should be to
#' ensure good results.
#'
#' @param splitFormula Formula specifying the longitudinal response variable and the time-constant variables that will be used for splitting in the tree.
#' @param tformula Formula specifying the longitudinal response variable and the variable that acts as the time variable.
#' @param idvar The name of the variable that serves as the ID variable for grouping observations. Must be in quotes
#' @param data dataframe that contains all variables specified in the formulas- in long format.
#' @param knots Specified locations for internal knots in the spline basis. Defaults to NULL, which corresponds to no internal knots.
#' @param df Degrees of freedom of the spline basis. If this is specified but the knots parameter is NULL, then the appropriate number of internal knots
#' will be added at quantiles of the training data. If both df and knots are unspecified, the spline basis will have no internal knots.
#' @param degree Specifies degree of spline basis used in the tree.
#' @param intercept Specifies whether or not the splitting process will consider the intercept coefficient of the spline projections.
#' Defaults to FALSE, which means that the tree will split based on trajectory shape, ignoring response level.
#' @param nGrid Number of grid points to evaluate projection sum of squares at. If gridPoints is not supplied, then this is the
#' number of grid points that will be automatically placed at quantiles of the time variable. The default is 7.
#' @param gridPoints Optional. A vector of numbers that will be used as the grid on which to evaluate the projection
#' sum of squares. Should fall roughly within the range of the time variable.
#' @param minNodeSize Minimum number of observational units that can be in a terminal node. Controls tree size and helps avoid overfitting.
#' Default is 10.
#' @param cp Complexity parameter passed to the rpart building process. Default is the rpart default of 0.01
#' @param ntree Number of trees in the forest.
#' @param prob Probability of selecting a variable to included as a candidate for each split.
#' @param bootstrap Boolean specifying whether bootstrap sampling should be used when choosing data to
#' use for each tree. When set to FALSE (the default), sampling without replacement is used and 63.5% of the data
#' is used for each tree. When set to TRUE, a bootstrap sample is used for each tree.
#' @return A spline forest model, which is a named list with 15 components.
#' The list stores a list of trees (in model$Trees), along with information about the
#' spline basis used (model$intercept, model$innerKnots, model$boundaryKnots, etc.), and information about which datapoints were
#' used to build each tree (model$oob_indices and model$index). Note that each element in model$Trees is an rpart object but
#' it is not the same as a model returned from splineTree() because it does not store all relevant information in model$parms.
#' @export
#' @import nlme
#' @import rpart
#' @import splines
#' @importFrom graphics barplot layout par plot points rect text
#' @importFrom stats complete.cases formula lm quantile runif sd terms time
#' @examples
#' \donttest{
#' nlsySubset <- nlsySample[nlsySample$ID %in% sample(unique(nlsySample$ID), 400),]
#' splitForm <-~HISP+WHITE+BLACK+HGC_MOTHER+HGC_FATHER+SEX+Num_sibs
#' sampleForest <- splineForest(splitForm, BMI~AGE, 'ID', nlsySubset, degree=1, cp=0.005, ntree=10)
#' }
splineForest <- function(splitFormula, tformula,
idvar, data, knots = NULL, df = NULL, degree = 3,
intercept = FALSE, nGrid = 7, gridPoints = NULL, ntree = 50, prob = 0.3,
cp = 0.001, minNodeSize=1, bootstrap=FALSE) {
#### Once per forest, need to do all of the
#### preprocessing spline steps.
yvar <- attr(terms(getResponseFormula(tformula)),
"term.labels")
tvar <- attr(terms(tformula), "term.labels")
splitvars <- attr(terms(splitFormula), "term.labels")
#Add an error if (1-prob)^(length(splitvars)) < .01 or so - Increase the prob
### Check for time-varying covariates in
### splitvars
if (length(unique(data[, c(idvar, splitvars)])[[idvar]]) !=
length(unique(data[[idvar]]))) {
stop("Split variables must be non-time-varying.")
}
flat_data <- flatten_predictors(idvar, data)
results <- getBasisMat(yvar, tvar, idvar, data,
knots, df, degree, intercept, gridPoints, nGrid)
basisMatrix <- results[[1]]
innerKnots <- results[[2]]
boundaryKnots <- results[[3]]
Ydata <- sapply(unique(data[[idvar]]), individual_spline,
idvar, yvar, tvar, data, boundaryKnots,
innerKnots, degree, intercept)
intercept_coeffs <- Ydata[1,]
if (!intercept) {
Ydata <- Ydata[-1,]
}
if (is.vector(Ydata)) {
flat_data$Ydata <- Ydata
} else {
flat_data$Ydata <- t(Ydata)
}
flat_data$intercept_coeffs <- intercept_coeffs
### In new data frame, remove the original y data
flat_data <- flat_data[, names(flat_data) != yvar]
### Another step of data processing - get rid of
### any row that has NA coeffs in the Y variable
### If we don't, RPART will do it for us, but our
### data structures will not match later
flat_data <- flat_data[complete.cases(Ydata),]
Ydata <- as.matrix(Ydata)[complete.cases(Ydata),]
data <- data.frame(data[data[[idvar]] %in% flat_data[[idvar]],])
#### Now all forest computation happens with respect to
#### the flat_data dataframe
ulist <- list(eval = spline_eval, split = splineforest_split,
init = spline_init)
control = rpart.control(cp = cp)
form = formula(paste("Ydata ~ ", paste(attr(terms(formula(splitFormula)),
"term.labels"), collapse = "+")))
#### Now preprocessing done: begin forest building.
if (bootstrap) {sampleSize = NROW(flat_data)}
else {sampleSize = 0.632*NROW(flat_data)}
myForest = list()
itbIndices = list()
oobIndices = list()
splits = c()
print("Building Tree:")
for (j in c(1:ntree)) {
print(j)
indices = sample(1:NROW(flat_data), sampleSize,
replace = bootstrap)
sample = flat_data[indices, ]
#### Since data is already processed, just
#### directly build rpart tree.
fit <- rpart(form, data = sample,
method = ulist, control = control,
maxcompete = 0, parms = list(basisMatrix,
prob))
if (is.null(fit$frame$yval2)) {
fit$frame$yval2 <- fit$frame$yval
}
### Save information from this iteration to forest.
itbIndices[[j]] = unique(indices)
myForest[[j]] = fit
oobIndices[[j]] = (1:NROW(flat_data))[-unique(indices)]
splits = c(splits, row.names(fit$splits))
}
results = list(myForest, itbIndices, splits, data,
flat_data, splitFormula, oobIndices, degree,
intercept, df, boundaryKnots, innerKnots,
idvar, yvar, tvar)
names(results) = c("Trees", "index", "splits",
"data", "flat_data", "formula", "oob_indices",
"degree", "intercept", "df", "boundaryKnots",
"innerKnots", "idvar", "yvar", "tvar")
results
}
| /R/SplineForest.R | no_license | anna-neufeld/splinetree | R | false | false | 8,779 | r | #' Build a spline random forest.
#'
#' Builds an ensemble of regression trees for longitudinal or functional data using the spline projection method. The resulting model
#' contains a list of spline trees along with some additional information. All parameters are used in the same way that they are used in
#' the splineTree() function. The additional parameter ntree specifies how many trees should be in the ensemble, and prob controls the
#' probability of selecting a given variable for split consideration at a node. This method may take several minutes to run- saving the forest after
#' building it is recommended.
#'
#' The ensemble method is highly similar to the random forest methodology of Breiman (2001). Each tree in the ensemble is fit to a random sample
#' of 63.5% of the data (sampled without replacement). At each node of each tree, only a subset of the split variables are considered candidates for the split. In our methodology,
#' the subset of variables considered at each node is determined by a random process. The prob parameter specifies the probability that a given variable
#' will be selected at a certain node. Because the method is based on probability, the same number of variables are not considered for splitting at each node
#' (as in the randomForest package). Note that if prob is small and the number of variables in the splitFormula is also small, there is a high probability that
#' no variables will be considered for splitting at a certain node, which is problematic. The fewer total variables there are, the larger prob should be to
#' ensure good results.
#'
#' @param splitFormula Formula specifying the longitudinal response variable and the time-constant variables that will be used for splitting in the tree.
#' @param tformula Formula specifying the longitudinal response variable and the variable that acts as the time variable.
#' @param idvar The name of the variable that serves as the ID variable for grouping observations. Must be in quotes
#' @param data dataframe that contains all variables specified in the formulas- in long format.
#' @param knots Specified locations for internal knots in the spline basis. Defaults to NULL, which corresponds to no internal knots.
#' @param df Degrees of freedom of the spline basis. If this is specified but the knots parameter is NULL, then the appropriate number of internal knots
#' will be added at quantiles of the training data. If both df and knots are unspecified, the spline basis will have no internal knots.
#' @param degree Specifies degree of spline basis used in the tree.
#' @param intercept Specifies whether or not the splitting process will consider the intercept coefficient of the spline projections.
#' Defaults to FALSE, which means that the tree will split based on trajectory shape, ignoring response level.
#' @param nGrid Number of grid points to evaluate projection sum of squares at. If gridPoints is not supplied, then this is the
#' number of grid points that will be automatically placed at quantiles of the time variable. The default is 7.
#' @param gridPoints Optional. A vector of numbers that will be used as the grid on which to evaluate the projection
#' sum of squares. Should fall roughly within the range of the time variable.
#' @param minNodeSize Minimum number of observational units that can be in a terminal node. Controls tree size and helps avoid overfitting.
#' Default is 10.
#' @param cp Complexity parameter passed to the rpart building process. Default is the rpart default of 0.01
#' @param ntree Number of trees in the forest.
#' @param prob Probability of selecting a variable to included as a candidate for each split.
#' @param bootstrap Boolean specifying whether bootstrap sampling should be used when choosing data to
#' use for each tree. When set to FALSE (the default), sampling without replacement is used and 63.5% of the data
#' is used for each tree. When set to TRUE, a bootstrap sample is used for each tree.
#' @return A spline forest model, which is a named list with 15 components.
#' The list stores a list of trees (in model$Trees), along with information about the
#' spline basis used (model$intercept, model$innerKnots, model$boundaryKnots, etc.), and information about which datapoints were
#' used to build each tree (model$oob_indices and model$index). Note that each element in model$Trees is an rpart object but
#' it is not the same as a model returned from splineTree() because it does not store all relevant information in model$parms.
#' @export
#' @import nlme
#' @import rpart
#' @import splines
#' @importFrom graphics barplot layout par plot points rect text
#' @importFrom stats complete.cases formula lm quantile runif sd terms time
#' @examples
#' \donttest{
#' nlsySubset <- nlsySample[nlsySample$ID %in% sample(unique(nlsySample$ID), 400),]
#' splitForm <-~HISP+WHITE+BLACK+HGC_MOTHER+HGC_FATHER+SEX+Num_sibs
#' sampleForest <- splineForest(splitForm, BMI~AGE, 'ID', nlsySubset, degree=1, cp=0.005, ntree=10)
#' }
splineForest <- function(splitFormula, tformula,
idvar, data, knots = NULL, df = NULL, degree = 3,
intercept = FALSE, nGrid = 7, gridPoints = NULL, ntree = 50, prob = 0.3,
cp = 0.001, minNodeSize=1, bootstrap=FALSE) {
#### Once per forest, need to do all of the
#### preprocessing spline steps.
yvar <- attr(terms(getResponseFormula(tformula)),
"term.labels")
tvar <- attr(terms(tformula), "term.labels")
splitvars <- attr(terms(splitFormula), "term.labels")
#Add an error if (1-prob)^(length(splitvars)) < .01 or so - Increase the prob
### Check for time-varying covariates in
### splitvars
if (length(unique(data[, c(idvar, splitvars)])[[idvar]]) !=
length(unique(data[[idvar]]))) {
stop("Split variables must be non-time-varying.")
}
flat_data <- flatten_predictors(idvar, data)
results <- getBasisMat(yvar, tvar, idvar, data,
knots, df, degree, intercept, gridPoints, nGrid)
basisMatrix <- results[[1]]
innerKnots <- results[[2]]
boundaryKnots <- results[[3]]
Ydata <- sapply(unique(data[[idvar]]), individual_spline,
idvar, yvar, tvar, data, boundaryKnots,
innerKnots, degree, intercept)
intercept_coeffs <- Ydata[1,]
if (!intercept) {
Ydata <- Ydata[-1,]
}
if (is.vector(Ydata)) {
flat_data$Ydata <- Ydata
} else {
flat_data$Ydata <- t(Ydata)
}
flat_data$intercept_coeffs <- intercept_coeffs
### In new data frame, remove the original y data
flat_data <- flat_data[, names(flat_data) != yvar]
### Another step of data processing - get rid of
### any row that has NA coeffs in the Y variable
### If we don't, RPART will do it for us, but our
### data structures will not match later
flat_data <- flat_data[complete.cases(Ydata),]
Ydata <- as.matrix(Ydata)[complete.cases(Ydata),]
data <- data.frame(data[data[[idvar]] %in% flat_data[[idvar]],])
#### Now all forest computation happens with respect to
#### the flat_data dataframe
ulist <- list(eval = spline_eval, split = splineforest_split,
init = spline_init)
control = rpart.control(cp = cp)
form = formula(paste("Ydata ~ ", paste(attr(terms(formula(splitFormula)),
"term.labels"), collapse = "+")))
#### Now preprocessing done: begin forest building.
if (bootstrap) {sampleSize = NROW(flat_data)}
else {sampleSize = 0.632*NROW(flat_data)}
myForest = list()
itbIndices = list()
oobIndices = list()
splits = c()
print("Building Tree:")
for (j in c(1:ntree)) {
print(j)
indices = sample(1:NROW(flat_data), sampleSize,
replace = bootstrap)
sample = flat_data[indices, ]
#### Since data is already processed, just
#### directly build rpart tree.
fit <- rpart(form, data = sample,
method = ulist, control = control,
maxcompete = 0, parms = list(basisMatrix,
prob))
if (is.null(fit$frame$yval2)) {
fit$frame$yval2 <- fit$frame$yval
}
### Save information from this iteration to forest.
itbIndices[[j]] = unique(indices)
myForest[[j]] = fit
oobIndices[[j]] = (1:NROW(flat_data))[-unique(indices)]
splits = c(splits, row.names(fit$splits))
}
results = list(myForest, itbIndices, splits, data,
flat_data, splitFormula, oobIndices, degree,
intercept, df, boundaryKnots, innerKnots,
idvar, yvar, tvar)
names(results) = c("Trees", "index", "splits",
"data", "flat_data", "formula", "oob_indices",
"degree", "intercept", "df", "boundaryKnots",
"innerKnots", "idvar", "yvar", "tvar")
results
}
|
myData <- read.table("household_power_consumption.txt",
header=TRUE,
na.strings="?",
sep=";",
stringsAsFactors=FALSE)
str(myData)
myIndex=(myData$Date=="1/2/2007") | (myData$Date=="2/2/2007")
myData=myData[myIndex,]
myData$DateTime=paste(myData$Date,myData$Time,sep=" ")
myData$DateTime=as.POSIXct(myData$DateTime,format="%d/%m/%Y %H:%M:%S")
# png(file="plot3.png", width=480, height=480)
plot(myData$DateTime,myData$Sub_metering_1,
type="l",xlab="",ylab="Energy sub metering")
lines(myData$DateTime,myData$Sub_metering_2,
type="l",col="red")
lines(myData$DateTime,myData$Sub_metering_3,
type="l",col="blue")
legend("topright",
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","blue","red"),
lty=c(1,1,1))
# dev.off() | /ExploratoryDataAnalysis/Project1/plot3.R | no_license | dwarnold45/MyCourseFolders | R | false | false | 872 | r | myData <- read.table("household_power_consumption.txt",
header=TRUE,
na.strings="?",
sep=";",
stringsAsFactors=FALSE)
str(myData)
myIndex=(myData$Date=="1/2/2007") | (myData$Date=="2/2/2007")
myData=myData[myIndex,]
myData$DateTime=paste(myData$Date,myData$Time,sep=" ")
myData$DateTime=as.POSIXct(myData$DateTime,format="%d/%m/%Y %H:%M:%S")
# png(file="plot3.png", width=480, height=480)
plot(myData$DateTime,myData$Sub_metering_1,
type="l",xlab="",ylab="Energy sub metering")
lines(myData$DateTime,myData$Sub_metering_2,
type="l",col="red")
lines(myData$DateTime,myData$Sub_metering_3,
type="l",col="blue")
legend("topright",
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","blue","red"),
lty=c(1,1,1))
# dev.off() |
.check_id <- function(id) {
if (!(class(id) %in% c('numeric', 'integer') && length(id) == 1)) {
stop('Provide the correct flight id.')
}
}
#' Get a flight information
#'
#' @param id The flight id
#'
#' @return A list with flight information
#' @export
#'
#' @examples
#' \dontrun{
#' pc_login()
#' get_flight(1)
#' }
get_flight <- function(id) {
.check_id(id)
response <- request(httr::GET, paste0('flight/', id))
httr::stop_for_status(response)
response <- httr::content(response)
response
}
#' Get GCPs for a flight
#'
#' @param id The flight id
#'
#' @return A list with GCP information
#' @export
#'
#' @examples
#' \dontrun{
#' pc_login()
#' get_flight_gcp(1)
#' }
get_flight_gcp <- function(id) {
.check_id(id)
response <- request(httr::GET, paste0('flight/', id, '/gcp'))
httr::stop_for_status(response)
response <- httr::content(response)
response
}
#' Get images for a flight
#'
#' @param id The flight id
#'
#' @return A list with images information
#' @export
#'
#' @examples
#' \dontrun{
#' pc_login()
#' get_flight_image(1)
#' }
get_flight_image <- function(id) {
.check_id(id)
response <- request(httr::GET, paste0('flight/', id, '/images'))
httr::stop_for_status(response)
response <- httr::content(response)
response
}
#' Get layers for a flight
#'
#' @param id The flight id
#'
#' @return A list with layers information
#' @export
#'
#' @examples
#' \dontrun{
#' pc_login()
#' get_flight_image(1)
#' }
get_flight_layer <- function(id, isMosaic = FALSE) {
.check_id(id)
response <- request(httr::GET, paste0('flight/', id, '/layers?isMosaic=', isMosaic))
httr::stop_for_status(response)
response <- httr::content(response)
response
}
| /R/flight.R | no_license | PengchengHU/PhenoCopterAPI | R | false | false | 1,758 | r |
.check_id <- function(id) {
if (!(class(id) %in% c('numeric', 'integer') && length(id) == 1)) {
stop('Provide the correct flight id.')
}
}
#' Get a flight information
#'
#' @param id The flight id
#'
#' @return A list with flight information
#' @export
#'
#' @examples
#' \dontrun{
#' pc_login()
#' get_flight(1)
#' }
get_flight <- function(id) {
.check_id(id)
response <- request(httr::GET, paste0('flight/', id))
httr::stop_for_status(response)
response <- httr::content(response)
response
}
#' Get GCPs for a flight
#'
#' @param id The flight id
#'
#' @return A list with GCP information
#' @export
#'
#' @examples
#' \dontrun{
#' pc_login()
#' get_flight_gcp(1)
#' }
get_flight_gcp <- function(id) {
.check_id(id)
response <- request(httr::GET, paste0('flight/', id, '/gcp'))
httr::stop_for_status(response)
response <- httr::content(response)
response
}
#' Get images for a flight
#'
#' @param id The flight id
#'
#' @return A list with images information
#' @export
#'
#' @examples
#' \dontrun{
#' pc_login()
#' get_flight_image(1)
#' }
get_flight_image <- function(id) {
.check_id(id)
response <- request(httr::GET, paste0('flight/', id, '/images'))
httr::stop_for_status(response)
response <- httr::content(response)
response
}
#' Get layers for a flight
#'
#' @param id The flight id
#'
#' @return A list with layers information
#' @export
#'
#' @examples
#' \dontrun{
#' pc_login()
#' get_flight_image(1)
#' }
get_flight_layer <- function(id, isMosaic = FALSE) {
.check_id(id)
response <- request(httr::GET, paste0('flight/', id, '/layers?isMosaic=', isMosaic))
httr::stop_for_status(response)
response <- httr::content(response)
response
}
|
# Swagger Petstore
#
# This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters.
#
# OpenAPI spec version: 1.0.0
# Contact: apiteam@swagger.io
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' User Class
#'
#' @field id
#' @field username
#' @field firstName
#' @field lastName
#' @field email
#' @field password
#' @field phone
#' @field userStatus
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
User <- R6::R6Class(
'User',
public = list(
`id` = NULL,
`username` = NULL,
`firstName` = NULL,
`lastName` = NULL,
`email` = NULL,
`password` = NULL,
`phone` = NULL,
`userStatus` = NULL,
initialize = function(`id`, `username`, `firstName`, `lastName`, `email`, `password`, `phone`, `userStatus`){
if (!missing(`id`)) {
stopifnot(is.numeric(`id`), length(`id`) == 1)
self$`id` <- `id`
}
if (!missing(`username`)) {
stopifnot(is.character(`username`), length(`username`) == 1)
self$`username` <- `username`
}
if (!missing(`firstName`)) {
stopifnot(is.character(`firstName`), length(`firstName`) == 1)
self$`firstName` <- `firstName`
}
if (!missing(`lastName`)) {
stopifnot(is.character(`lastName`), length(`lastName`) == 1)
self$`lastName` <- `lastName`
}
if (!missing(`email`)) {
stopifnot(is.character(`email`), length(`email`) == 1)
self$`email` <- `email`
}
if (!missing(`password`)) {
stopifnot(is.character(`password`), length(`password`) == 1)
self$`password` <- `password`
}
if (!missing(`phone`)) {
stopifnot(is.character(`phone`), length(`phone`) == 1)
self$`phone` <- `phone`
}
if (!missing(`userStatus`)) {
stopifnot(is.numeric(`userStatus`), length(`userStatus`) == 1)
self$`userStatus` <- `userStatus`
}
},
toJSON = function() {
UserObject <- list()
if (!is.null(self$`id`)) {
UserObject[['id']] <- self$`id`
}
if (!is.null(self$`username`)) {
UserObject[['username']] <- self$`username`
}
if (!is.null(self$`firstName`)) {
UserObject[['firstName']] <- self$`firstName`
}
if (!is.null(self$`lastName`)) {
UserObject[['lastName']] <- self$`lastName`
}
if (!is.null(self$`email`)) {
UserObject[['email']] <- self$`email`
}
if (!is.null(self$`password`)) {
UserObject[['password']] <- self$`password`
}
if (!is.null(self$`phone`)) {
UserObject[['phone']] <- self$`phone`
}
if (!is.null(self$`userStatus`)) {
UserObject[['userStatus']] <- self$`userStatus`
}
UserObject
},
fromJSON = function(UserJson) {
UserObject <- jsonlite::fromJSON(UserJson)
if (!is.null(UserObject$`id`)) {
self$`id` <- UserObject$`id`
}
if (!is.null(UserObject$`username`)) {
self$`username` <- UserObject$`username`
}
if (!is.null(UserObject$`firstName`)) {
self$`firstName` <- UserObject$`firstName`
}
if (!is.null(UserObject$`lastName`)) {
self$`lastName` <- UserObject$`lastName`
}
if (!is.null(UserObject$`email`)) {
self$`email` <- UserObject$`email`
}
if (!is.null(UserObject$`password`)) {
self$`password` <- UserObject$`password`
}
if (!is.null(UserObject$`phone`)) {
self$`phone` <- UserObject$`phone`
}
if (!is.null(UserObject$`userStatus`)) {
self$`userStatus` <- UserObject$`userStatus`
}
},
toJSONString = function() {
sprintf(
'{
"id": %d,
"username": "%s",
"firstName": "%s",
"lastName": "%s",
"email": "%s",
"password": "%s",
"phone": "%s",
"userStatus": "%s"
}',
self$`id`,
self$`username`,
self$`firstName`,
self$`lastName`,
self$`email`,
self$`password`,
self$`phone`,
self$`userStatus`
)
},
fromJSONString = function(UserJson) {
UserObject <- jsonlite::fromJSON(UserJson)
self$`id` <- UserObject$`id`
self$`username` <- UserObject$`username`
self$`firstName` <- UserObject$`firstName`
self$`lastName` <- UserObject$`lastName`
self$`email` <- UserObject$`email`
self$`password` <- UserObject$`password`
self$`phone` <- UserObject$`phone`
self$`userStatus` <- UserObject$`userStatus`
}
)
)
| /samples/client/petstore/r_test/R/User.r | permissive | Metaswitch/swagger-codegen | R | false | false | 4,843 | r | # Swagger Petstore
#
# This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters.
#
# OpenAPI spec version: 1.0.0
# Contact: apiteam@swagger.io
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' User Class
#'
#' @field id
#' @field username
#' @field firstName
#' @field lastName
#' @field email
#' @field password
#' @field phone
#' @field userStatus
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
User <- R6::R6Class(
'User',
public = list(
`id` = NULL,
`username` = NULL,
`firstName` = NULL,
`lastName` = NULL,
`email` = NULL,
`password` = NULL,
`phone` = NULL,
`userStatus` = NULL,
initialize = function(`id`, `username`, `firstName`, `lastName`, `email`, `password`, `phone`, `userStatus`){
if (!missing(`id`)) {
stopifnot(is.numeric(`id`), length(`id`) == 1)
self$`id` <- `id`
}
if (!missing(`username`)) {
stopifnot(is.character(`username`), length(`username`) == 1)
self$`username` <- `username`
}
if (!missing(`firstName`)) {
stopifnot(is.character(`firstName`), length(`firstName`) == 1)
self$`firstName` <- `firstName`
}
if (!missing(`lastName`)) {
stopifnot(is.character(`lastName`), length(`lastName`) == 1)
self$`lastName` <- `lastName`
}
if (!missing(`email`)) {
stopifnot(is.character(`email`), length(`email`) == 1)
self$`email` <- `email`
}
if (!missing(`password`)) {
stopifnot(is.character(`password`), length(`password`) == 1)
self$`password` <- `password`
}
if (!missing(`phone`)) {
stopifnot(is.character(`phone`), length(`phone`) == 1)
self$`phone` <- `phone`
}
if (!missing(`userStatus`)) {
stopifnot(is.numeric(`userStatus`), length(`userStatus`) == 1)
self$`userStatus` <- `userStatus`
}
},
toJSON = function() {
UserObject <- list()
if (!is.null(self$`id`)) {
UserObject[['id']] <- self$`id`
}
if (!is.null(self$`username`)) {
UserObject[['username']] <- self$`username`
}
if (!is.null(self$`firstName`)) {
UserObject[['firstName']] <- self$`firstName`
}
if (!is.null(self$`lastName`)) {
UserObject[['lastName']] <- self$`lastName`
}
if (!is.null(self$`email`)) {
UserObject[['email']] <- self$`email`
}
if (!is.null(self$`password`)) {
UserObject[['password']] <- self$`password`
}
if (!is.null(self$`phone`)) {
UserObject[['phone']] <- self$`phone`
}
if (!is.null(self$`userStatus`)) {
UserObject[['userStatus']] <- self$`userStatus`
}
UserObject
},
fromJSON = function(UserJson) {
UserObject <- jsonlite::fromJSON(UserJson)
if (!is.null(UserObject$`id`)) {
self$`id` <- UserObject$`id`
}
if (!is.null(UserObject$`username`)) {
self$`username` <- UserObject$`username`
}
if (!is.null(UserObject$`firstName`)) {
self$`firstName` <- UserObject$`firstName`
}
if (!is.null(UserObject$`lastName`)) {
self$`lastName` <- UserObject$`lastName`
}
if (!is.null(UserObject$`email`)) {
self$`email` <- UserObject$`email`
}
if (!is.null(UserObject$`password`)) {
self$`password` <- UserObject$`password`
}
if (!is.null(UserObject$`phone`)) {
self$`phone` <- UserObject$`phone`
}
if (!is.null(UserObject$`userStatus`)) {
self$`userStatus` <- UserObject$`userStatus`
}
},
toJSONString = function() {
sprintf(
'{
"id": %d,
"username": "%s",
"firstName": "%s",
"lastName": "%s",
"email": "%s",
"password": "%s",
"phone": "%s",
"userStatus": "%s"
}',
self$`id`,
self$`username`,
self$`firstName`,
self$`lastName`,
self$`email`,
self$`password`,
self$`phone`,
self$`userStatus`
)
},
fromJSONString = function(UserJson) {
UserObject <- jsonlite::fromJSON(UserJson)
self$`id` <- UserObject$`id`
self$`username` <- UserObject$`username`
self$`firstName` <- UserObject$`firstName`
self$`lastName` <- UserObject$`lastName`
self$`email` <- UserObject$`email`
self$`password` <- UserObject$`password`
self$`phone` <- UserObject$`phone`
self$`userStatus` <- UserObject$`userStatus`
}
)
)
|
library(ggplot2)
rm(list = ls())
balance.current = 42281.39 + 11296.46
date.current = as.Date("2020-10-18") # Whatever date above balance is current as of
paycheck = function(date){
last.paydate = as.Date("2020-10-01")
if (as.numeric(date - last.paydate) %% 14 == 0){
return(2480 * 2)
} else{
return(0)
}
}
existing.credit.card = function(date){
# As of 2020/10/05
payment = switch(as.character(date),
"2020-10-20" = 1278.14, # SLFCU card existing charges + any new charges between 2020/10/05 and 2020/10/20 (leaving latter as zero and putting all to new Citi charges)
0)
return(payment)
}
projected.credit.card = function(date){
# This should include all non-utility purchases; e.g., groceries, fun, other purchases
# Should include all Citi charges after 2020/09/18 and all SLFCU charges after 2020/10/20
payment = switch(as.character(date),
"2020-11-15" = 687.14, # Citi card ending 2020/10/18
"2020-12-15" = 1000, # Citi card ending 2020/11/18
"2020-11-20" = 0, # SLFCU card ending 2020/11/20
"2020-12-20" = 0, # SLFCU card ending 2020/12/20
0)
return(payment)
}
mortgage = function(date){
payment = switch(as.character(date),
"2020-11-01" = 2656,
"2020-12-01" = 2656,
"2021-01-01" = 2656,
0)
return(payment)
}
water.bill = function(date){
payment = switch(as.character(date),
"2020-10-24" = 300,
"2020-11-24" = 200,
"2020-12-24" = 100,
0)
return(payment)
}
electric.bill = function(date){
payment = switch(as.character(date),
"2020-11-12" = 100,
"2020-12-12" = 100,
0)
return(payment)
}
gas.bill = function(date){
payment = switch(as.character(date),
"2020-11-13" = 50,
"2020-12-13" = 100,
0)
return(payment)
}
verizon.bill = function(date){
payment = switch(as.character(date),
"2020-10-28" = 234,
"2020-11-28" = 234,
"2020-12-28" = 234,
0)
return(payment)
}
comcast.bill = function(date){
payment = switch(as.character(date),
"2020-10-14" = 90,
"2020-11-14" = 90,
"2020-12-14" = 90,
0)
return(payment)
}
remodel.bill = function(date){
payment = switch(as.character(date),
"2020-10-23" = 15187.96,
"2020-10-30" = 6472.73,
"2020-11-06" = 1618.18 + 17000,
0)
return(payment)
}
# Calculating running balance
balance = balance.current
date = date.current
end.date = as.Date("2021-01-15")
balance.history = balance
date.history = date
while (date < end.date){
new.balance = balance +
paycheck(date) -
existing.credit.card(date) -
projected.credit.card(date) -
mortgage(date) -
water.bill(date) -
electric.bill(date) -
gas.bill(date) -
verizon.bill(date) -
comcast.bill(date) -
remodel.bill(date)
new.date = date + 1
balance.history = c(balance.history, new.balance)
date.history = c(date.history, new.date)
balance = new.balance
date = new.date
}
plot.df = data.frame(date = date.history, balance = balance.history)
ggplot(plot.df, aes(date, balance)) +
geom_line() +
geom_area(alpha = 0.25) +
geom_hline(yintercept = 0, color = "red") +
geom_hline(yintercept = 5000, color = "orange") +
geom_hline(yintercept = 20000, color = "purple") +
geom_vline(xintercept = as.Date("2020-11-18"), color = "dark green") +
annotate(geom = "text",
label = paste0("Min. balance of $", min(balance.history), "\non ", date.history[which.min(balance.history)]),
x = max(date.history), y = Inf, hjust = 1, vjust = 1.5) +
theme_bw()
| /Budgeting/kitchen_remodel.R | no_license | tkmckenzie/pan | R | false | false | 4,005 | r | library(ggplot2)
rm(list = ls())
balance.current = 42281.39 + 11296.46
date.current = as.Date("2020-10-18") # Whatever date above balance is current as of
paycheck = function(date){
last.paydate = as.Date("2020-10-01")
if (as.numeric(date - last.paydate) %% 14 == 0){
return(2480 * 2)
} else{
return(0)
}
}
existing.credit.card = function(date){
# As of 2020/10/05
payment = switch(as.character(date),
"2020-10-20" = 1278.14, # SLFCU card existing charges + any new charges between 2020/10/05 and 2020/10/20 (leaving latter as zero and putting all to new Citi charges)
0)
return(payment)
}
projected.credit.card = function(date){
# This should include all non-utility purchases; e.g., groceries, fun, other purchases
# Should include all Citi charges after 2020/09/18 and all SLFCU charges after 2020/10/20
payment = switch(as.character(date),
"2020-11-15" = 687.14, # Citi card ending 2020/10/18
"2020-12-15" = 1000, # Citi card ending 2020/11/18
"2020-11-20" = 0, # SLFCU card ending 2020/11/20
"2020-12-20" = 0, # SLFCU card ending 2020/12/20
0)
return(payment)
}
mortgage = function(date){
payment = switch(as.character(date),
"2020-11-01" = 2656,
"2020-12-01" = 2656,
"2021-01-01" = 2656,
0)
return(payment)
}
water.bill = function(date){
payment = switch(as.character(date),
"2020-10-24" = 300,
"2020-11-24" = 200,
"2020-12-24" = 100,
0)
return(payment)
}
electric.bill = function(date){
payment = switch(as.character(date),
"2020-11-12" = 100,
"2020-12-12" = 100,
0)
return(payment)
}
gas.bill = function(date){
payment = switch(as.character(date),
"2020-11-13" = 50,
"2020-12-13" = 100,
0)
return(payment)
}
verizon.bill = function(date){
payment = switch(as.character(date),
"2020-10-28" = 234,
"2020-11-28" = 234,
"2020-12-28" = 234,
0)
return(payment)
}
comcast.bill = function(date){
payment = switch(as.character(date),
"2020-10-14" = 90,
"2020-11-14" = 90,
"2020-12-14" = 90,
0)
return(payment)
}
remodel.bill = function(date){
payment = switch(as.character(date),
"2020-10-23" = 15187.96,
"2020-10-30" = 6472.73,
"2020-11-06" = 1618.18 + 17000,
0)
return(payment)
}
# Calculating running balance
balance = balance.current
date = date.current
end.date = as.Date("2021-01-15")
balance.history = balance
date.history = date
while (date < end.date){
new.balance = balance +
paycheck(date) -
existing.credit.card(date) -
projected.credit.card(date) -
mortgage(date) -
water.bill(date) -
electric.bill(date) -
gas.bill(date) -
verizon.bill(date) -
comcast.bill(date) -
remodel.bill(date)
new.date = date + 1
balance.history = c(balance.history, new.balance)
date.history = c(date.history, new.date)
balance = new.balance
date = new.date
}
plot.df = data.frame(date = date.history, balance = balance.history)
ggplot(plot.df, aes(date, balance)) +
geom_line() +
geom_area(alpha = 0.25) +
geom_hline(yintercept = 0, color = "red") +
geom_hline(yintercept = 5000, color = "orange") +
geom_hline(yintercept = 20000, color = "purple") +
geom_vline(xintercept = as.Date("2020-11-18"), color = "dark green") +
annotate(geom = "text",
label = paste0("Min. balance of $", min(balance.history), "\non ", date.history[which.min(balance.history)]),
x = max(date.history), y = Inf, hjust = 1, vjust = 1.5) +
theme_bw()
|
#' docxtractr is an R pacakge for extracting tables out of Word documents (docx)
#'
#' Microsoft Word docx files provide an XML structure that is fairly
#' straightforward to navigate, especially when it applies to Word tables. The
#' docxtractr package provides tools to determine table count + table structure and
#' extract tables from Microsoft Word docx documents.
#'
#' @name docxtractr
#' @docType package
#'
#' @author Bob Rudis (@@hrbrmstr)
#' @importFrom xml2 xml_find_all xml_text xml_ns xml_find_first xml_attrs
#' @importFrom dplyr bind_rows
#' @importFrom tools file_ext
#' @importFrom utils download.file unzip
NULL
| /R/docxtractr-package.r | no_license | jimhester/docxtractr | R | false | false | 631 | r | #' docxtractr is an R pacakge for extracting tables out of Word documents (docx)
#'
#' Microsoft Word docx files provide an XML structure that is fairly
#' straightforward to navigate, especially when it applies to Word tables. The
#' docxtractr package provides tools to determine table count + table structure and
#' extract tables from Microsoft Word docx documents.
#'
#' @name docxtractr
#' @docType package
#'
#' @author Bob Rudis (@@hrbrmstr)
#' @importFrom xml2 xml_find_all xml_text xml_ns xml_find_first xml_attrs
#' @importFrom dplyr bind_rows
#' @importFrom tools file_ext
#' @importFrom utils download.file unzip
NULL
|
#downloading file from source
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile="myfile.zip")
#unzipping file
unzip("myfile.zip")
#file path
myFile <- "household_power_consumption.txt"
#file path
#myFile <- "r/exdata-data-household_power_consumption/household_power_consumption.txt"
#reading file to memory
#IMPORTANT: na.string in this dataset is indicated as "?"
dat <- read.table(myFile, sep=";", header=TRUE, na.strings=c("?"))
#subsetting data to between 1/2/2007 to 2/2/2007
dat1 <- subset(dat, Date=="1/2/2007" | Date=="2/2/2007")
##fixing the time
# Combining the date and time column into one
datetime <- paste(dat1$Date, dat1$Time)
# Formating the time with strpTime
datetimeStp <- strptime(datetime, format="%d/%m/20%y %H:%M:%S")
dat1$Datetime <- datetimeStp
#plotting graph
par(mfrow = c(2,2))
##Plot1
plot(dat1$Datetime, dat1$Global_active_power, xlab = "", ylab="Global Active Power(kilowatts)",type="l")
##Plot2
plot(dat1$Datetime, dat1$Voltage, xlab = "datetime", ylab="Voltage",type="l")
#plot3
#plotting using type "l"
plot(dat1$Datetime, as.numeric(dat1$Sub_metering_1), xlab = "", ylab="Energy sub metering",type="l")
#plot Sub_metering_2
lines(dat1$Datetime, as.numeric(dat1$Sub_metering_2), type="l", col="red")
#plot sub-metering_3
lines(dat1$Datetime, as.numeric(dat1$Sub_metering_3), type="l", col="blue")
# plotting the legend
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=0.6, lty=c(1,1,1), col=c("black", "blue", "red"))
##Plot4
plot(dat1$Datetime, dat1$Global_reactive_power, xlab = "datetime", ylab="Global_reactive_power",type="l")
#Output to PNG
dev.copy(png, file="plot4.png", width=480, height=480, units="px")
dev.off()
| /plot4.R | no_license | piewsook/ExData_Plotting1 | R | false | false | 1,766 | r | #downloading file from source
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile="myfile.zip")
#unzipping file
unzip("myfile.zip")
#file path
myFile <- "household_power_consumption.txt"
#file path
#myFile <- "r/exdata-data-household_power_consumption/household_power_consumption.txt"
#reading file to memory
#IMPORTANT: na.string in this dataset is indicated as "?"
dat <- read.table(myFile, sep=";", header=TRUE, na.strings=c("?"))
#subsetting data to between 1/2/2007 to 2/2/2007
dat1 <- subset(dat, Date=="1/2/2007" | Date=="2/2/2007")
##fixing the time
# Combining the date and time column into one
datetime <- paste(dat1$Date, dat1$Time)
# Formating the time with strpTime
datetimeStp <- strptime(datetime, format="%d/%m/20%y %H:%M:%S")
dat1$Datetime <- datetimeStp
#plotting graph
par(mfrow = c(2,2))
##Plot1
plot(dat1$Datetime, dat1$Global_active_power, xlab = "", ylab="Global Active Power(kilowatts)",type="l")
##Plot2
plot(dat1$Datetime, dat1$Voltage, xlab = "datetime", ylab="Voltage",type="l")
#plot3
#plotting using type "l"
plot(dat1$Datetime, as.numeric(dat1$Sub_metering_1), xlab = "", ylab="Energy sub metering",type="l")
#plot Sub_metering_2
lines(dat1$Datetime, as.numeric(dat1$Sub_metering_2), type="l", col="red")
#plot sub-metering_3
lines(dat1$Datetime, as.numeric(dat1$Sub_metering_3), type="l", col="blue")
# plotting the legend
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=0.6, lty=c(1,1,1), col=c("black", "blue", "red"))
##Plot4
plot(dat1$Datetime, dat1$Global_reactive_power, xlab = "datetime", ylab="Global_reactive_power",type="l")
#Output to PNG
dev.copy(png, file="plot4.png", width=480, height=480, units="px")
dev.off()
|
###### Here is the url on how to use GitHub and RStudio together ####
#### note that you have to open a shell to add in new branches ###
### http://r-bio.github.io/intro-git-rstudio/ ###
##Open shell (Tools>Shell in R Studio) and give it the upstream address:
## git remote add upstream https://github.com/Qamarsky/NESP.git.
## Make sure that it worked by typing git remote -v,
## it should display 4 lines, 2 that start with origin and the address of your fork,
## and 2 that start with upstream and the address of the upstream repository.
# Note that here we used upstream to name the upstream repository but we could have given it another name
## Create a branch for the changes in R studio through the shell:
## git checkout -b proposed-fixes master. Proposed-fixes is our branch name within this R studio project.
### Once you have made changes, go to the GIT icon in the menu bar and commit changes.
## once changes are finished, open shell and type 'git push origin proposed-fixes' (Note that
# proposed-fixes is the name of the branch you chose when you originated this branch)
### to get the newest version of the files into R's folder use the command: git pull <remote> <branch>
## In this case we have set the remote file to be called origin, and the branch we are working on is master
## so command is: 'git pull origin master'
rm(list = ls())
#### required libraries ####
library(partykit)
library(maps)
library(ggmap)
library(mgcv)
library(ggplot2)
library(googleVis)
library(multcomp)
library(rdrop2)
library(httpuv)
library(Hmisc)
##### Add in transect data ####
drop_auth()
drop_dir(path="/NESP")
setwd("/Users/uqqschuy/Documents/R data/NESP/")
#Covars<-drop_read_csv("/NESP/Data/Transect data/Transect_data_all_170217.csv", stringsAsFactors=FALSE)
#Covarsx<-drop_read_csv("/NESP/data/transect data/Global/Global_dataset_290317_UID.csv", stringsAsFactors=FALSE)
Covars<-drop_read_csv("/NESP/data/transect data/Global/Global_dataset_170516_UID_SEIF2011RoadsFix.csv", stringsAsFactors=FALSE)
Landcov<-drop_read_csv("/NESP/Data/landuse.csv")
KABsite<-drop_read_csv("/NESP/Data/Transect data/KAB/KAB_site_types.csv")
KAB<- drop_read_csv ("/NESP/Data/Transect data/KAB/KAB_AllData_GlID_UID_08_05_17.csv", stringsAsFactors=FALSE)
CUA <- drop_read_csv("/NESP/Data/Transect data/CUA/CUA_AllData3_UID_22_2_17.csv", stringsAsFactors=FALSE)
CSIRO<-drop_read_csv ("/NESP/data/transect data/CSIRO/CSIRO_public_collection_only_22_2_17.csv")
Grid<-drop_read_csv("NESP/Data/Grid data/Syd_fishnet_centerpoints_covars_200430_inland.csv", stringsAsFactors=FALSE)
################## SKIP IF INTERNET CONNECTED ######################
############## for non-internet version: #########################
#write.csv(Grid, file="/Users/uqqschuy/Documents/R data/NESP/Syd_fishnet_centerpoints_covars_200430_inland.csv")
Covars<-read.csv("Global_dataset_80517_UID.csv", stringsAsFactors=FALSE) ### update this!!! ###
Landcov<-read.csv("landuse.csv")
KABsite<-read.csv("KAB_site_types.csv")
KAB<-read.csv("KAB_AllData_GlID_UID_08_05_17.csv")
CUA <- read.csv("CUA_AllData3_UID_22_2_17.csv", stringsAsFactors=FALSE)
CSIRO<-read.csv ("CSIRO_public_collection_only_22_2_17.csv")
Grid<-read.csv("Syd_fishnet_centerpoints_covars_200430_inland.csv", stringsAsFactors=FALSE)
################## START HERE ######################
Covars$Year <-as.POSIXlt(strptime(as.character(Covars$Date), format = "%d/%m/%Y"))$year+1900
Covars$Month <- as.POSIXlt(strptime(as.character(Covars$Date), format = "%d/%m/%Y"))$mon+1
Covars$Day <- as.POSIXlt(strptime(as.character(Covars$Date), format = "%d/%m/%Y"))$mday
Covars$Source<-as.factor(Covars$Source)
Covars$State<-as.factor(Covars$State)
##### KAB ######
## get total debris for KAB and put it into Covars dataset
KAB$Total_Debris<-rowSums(KAB[,grepl("No.of", names(KAB))], na.rm=TRUE)
KABmatch<-match(Covars$UID[Covars$Source=="KAB"], KAB$UID)
Covars$Total_Debris[Covars$Source=="KAB"]<-KAB$Total_Debris[KABmatch]
### Now let's look at CUA data ###
### Many of the categories are for some reason not numeric. Change those to numeric and then you can add the columns to get a total
CUA[,88]<-as.numeric(paste(CUA[,88]))
CUA[,89]<-as.numeric(paste(CUA[,89]))
CUA[,93]<-as.numeric(paste(CUA[,93]))
CUA[,94]<-as.numeric(paste(CUA[,94]))
CUA[,98]<-as.numeric(paste(CUA[,98]))
CUA[,99]<-as.numeric(paste(CUA[,99]))
CUA[,100]<-as.numeric(paste(CUA[,100]))
CUA[,101]<-as.numeric(paste(CUA[,101]))
CUA[,107]<-as.numeric(paste(CUA[,107]))
CUA[,114]<-as.numeric(paste(CUA[,114]))
CUA[,119]<-as.numeric(paste(CUA[,119]))
CUA[,120]<-as.numeric(paste(CUA[,120]))
CUA[,122]<-as.numeric(paste(CUA[,122]))
CUA[,123]<-as.numeric(paste(CUA[,123]))
CUA[,124]<-as.numeric(paste(CUA[,124]))
CUA[,125]<-as.numeric(paste(CUA[,125]))
CUA[,128]<-as.numeric(paste(CUA[,128]))
CUA[,135]<-as.numeric(paste(CUA[,135]))
CUA[,141]<-as.numeric(paste(CUA[,141]))
CUA[,143]<-as.numeric(paste(CUA[,143]))
CUA[,145]<-as.numeric(paste(CUA[,145]))
CUA[,147]<-as.numeric(paste(CUA[,147]))
CUA[,149]<-as.numeric(paste(CUA[,149]))
CUA[,150]<-as.numeric(paste(CUA[,150]))
CUA[,152]<-as.numeric(paste(CUA[,152]))
CUA[,156]<-as.numeric(paste(CUA[,156]))
CUA[,173]<-as.numeric(paste(CUA[,173]))
CUA$Total_Debris<-rowSums(CUA[,c(87:109,112:159,161,163:180)], na.rm=TRUE) ## for some reason many of these rows are not numeric
CUA$Latitude<--(CUA$Latitude)
### get site type
CUA$SiteType<-"Other"
CUA$SiteType[CUA$River=="Y" | CUA$River=="y"]<-"River"
CUA$SiteType[CUA$Parks=="Y" | CUA$Parks=="y"]<-"Recreational Park"
CUA$SiteType[CUA$Beach=="Y" | CUA$Beach=="y"]<-"Beach"
CUA$SiteType[CUA$Roadway=="Y" | CUA$Roadway=="y"]<-"Highway"
CUA$SiteType[CUA$PubBushland=="Y" | CUA$PubBushland=="y"]<-"Bushland"
CUA$SiteType[CUA$School=="Y" | CUA$School=="y"]<-"School"
CUA$SiteType[CUA$OutdoorTrans=="Y" | CUA$OutdoorTrans=="y"]<-"OutdoorTrans"
CUA$SiteType[CUA$ShopsMalls=="Y" | CUA$ShopsMalls=="y"]<-"Shopping Centre"
CUA$SiteType[CUA$DiveSite=="Y" | CUA$DiveSite=="y"]<-"DiveSite"
#### Total takes into account the length in meters of wire, pvc, etc. While total calc does not.
### first would be great to standardise by something... ###
### KAB data theoretically is standardised to a 1000m square area, according to cleanup protocols ###
### standardise CUA by Aream2 because Area km2 has lots of NAs for some reason. Let's standardise to 1000m2 like KAB
## Aream2 is a factor, so we have to change it to numeric.
CUA$Aream2<-as.numeric(CUA$Aream2)
CUA$Totalper1000m2<-CUA$Total_Debris/CUA$Aream2*1000
CUA.Sub<-CUA[is.na(CUA$Totalper1000m2)==FALSE,]
CUAmatch<-match(Covars$UID[Covars$Source=="CUA"], CUA$UID)
Covars$Total_Debris[Covars$Source=="CUA"]<-CUA$Total_Debris[CUAmatch]
Covars$SiteType[Covars$Source=="CUA"]<-CUA$SiteType[CUAmatch]
##### CSIRO DATA ######
CSIRO$Total_Debris<-rowSums(CSIRO[,5:236])
CSmatch<-match(Covars$UID[Covars$Source=="CSIRO"|Covars$Source=="Transect"|Covars$Source=="Emu"], CSIRO$UID)
Covars$Total_Debris[Covars$Source=="CSIRO"|Covars$Source=="Transect"|Covars$Source=="Emu"]<-CSIRO$Total_Debris[CSmatch]
#### massaging various covariates
#Covars$Area_m2<-as.numeric(Covars$Area_m2)
Covars$Area_m2[Covars$Source=="KAB"]<-1000
Covars$Totper1000<-(Covars$Total_Debris/Covars$Area_m2)*1000
Covars$All_roads_50<-rowSums(Covars[,c("DualCarriageRd50km","MinorRd50km","PrincialRd50km","SecondaryRd50km","Track50km")], na.rm=TRUE)
Covars$All_roads_5<-rowSums(Covars[,c("DualCarriageRd5km","MinorRd5km","PrincialRd5km","SecondaryRd5km","Track5km")], na.rm=TRUE)
Covars$Prim.land<-Landcov$PRIMARY_V7[match(Covars$Landuse_code, Landcov$Landuse)]
#Covars$State<-as.factor(Covars$State)
Covars$roads_5to50km_resids <- lm(Covars$All_roads_5 ~ Covars$All_roads_50)$residuals
Covars$Pop_5km[is.na(Covars$Pop_5km)==TRUE]<-0
Covars$Pop5to50km_resids<-lm(Covars$Pop_5km ~ Covars$Pop_50km)$residuals
Covars$SiteType[Covars$Source=="KAB"]<-as.character(KABsite$site_type[match(Covars$Global_ID[Covars$Source=="KAB"], KABsite$Global_ID)]) ### fix this
Covars$SiteType[Covars$SiteType=="Car park"]<-"Car Park"
Covars$SiteType[Covars$SiteType=="Retail"]<-"Retail Strip"
Covars$SiteType[Covars$SiteType=="Recreational"]<-"Recreational Park"
Covars$SiteType<-as.factor(Covars$SiteType)
## put in a log in case of doing gams.
Covars$Log_Debris<-log(Covars$Totper1000 +1)
## add number of drink containers ### - FIX THIS??
## Covars$Containers<-KAB$Containers[match(Covars$UID, KAB$UID)]
## add in site code
Covars$Sitecode<-KAB$sitecode.x[match(Covars$UID, KAB$UID)]
### remove NAs in Totper1000 ##
Covars2<-Covars[is.finite(Covars$Totper1000)==TRUE,]
## remove NAs in Prim.land (these are all on Heron Island)
Covars2<-Covars2[is.na(Covars2$Prim.land)==FALSE,]
### remove the two outliers (>10000 items - I think they have been mis-recorded)
Covars2<-Covars2[Covars2$Total_Debris<10000,]
## remove NAs in pop5km (these are in Melbourne for some reason)
#Covars2<-Covars2[is.na(Covars2$Pop_5km)==FALSE,]
## remove ones without a date
#Covars2[is.na(Covars2$Year)==TRUE,] ## I think this will take out all of the new data. Let's see if we can find a year for these
## problems with the SEIF missing some bits
IDs<-Covars2$Global_ID[is.na(Covars2$eco_resour50km)==TRUE & is.na(Covars$eco_resour25km)==FALSE]
write.csv(Covars2[is.na(Covars2$eco_resour50km)==TRUE & is.na(Covars$eco_resour25km)==FALSE,], "missingdata.csv")
IDs2<-Covars2$Global_ID[is.na(Covars2$Edu_occupa5km)==TRUE & is.na(Covars2$Edu_occupa1km)==FALSE]
###### Grid data #######
#GridSEIF<-drop_read_csv("NESP/Data/Grid data/Sydney_fishnet_centrepoints_seif2011_170412.csv", stringsAsFactors=FALSE) ### fix file name
#Grid[,17:36]<-GridSEIF[,6:25]
#write.csv(Grid, file="/Users/uqqschuy/Documents/R data/NESP/Syd_fishnet_centerpoints_covars_200430_inland.csv")
#drop_upload("/Users/uqqschuy/Documents/R data/NESP/Syd_fishnet_centerpoints_covars_200430_inland.csv", dest="/NESP/Data/Grid data")
Grid$State<-rep("NSW", times=dim(Grid)[1])
Grid$All_roads_50<-rowSums(Grid[,c("DualCarriageRd50km","MinorRd50km","PrincialRd50km","SecondaryRd50km","Track50km")], na.rm=TRUE)
Grid$All_roads_5<-rowSums(Grid[,c("DualCarriageRd50km","MinorRd50km","PrincialRd50km","SecondaryRd50km","Track50km")], na.rm=TRUE)
#Gridtest<-read.csv("/Users/uqqschuy/Documents/R data/NESP/NESP/Sydney_fishnet_centrepoints_seif2011_170412.csv", stringsAsFactors=FALSE)
#### There are a few where landuse is -9999 because the cells are close to the water or on the water.
# Change these to water landcover
## Note that there were other cells where landuse was 0, TJ has changed them to nearest landuse value.
Grid$Landuse[Grid$Landuse<0]<-"663"
Grid$Prim.land<-Landcov$PRIMARY_V7[match(Grid$Landuse, Landcov$Landuse)]
Grid$roads_5to50km_resids<-lm(Grid$All_roads_5 ~ Grid$All_roads_50)$residuals
Grid$Pop5to50km_resids<-lm(Grid$Pop_5km ~ Grid$Pop_50km)$residuals
#### CHECKING DATA #####
#length(Grid2$UID[Grid2$Landuse==(-9999)])
### For some reason there are missing 5 and 50km roads.
wrongroads<-Grid$UID[Grid$All_roads_5==0| Grid$All_roads_50==0]
write.csv (wrongroads, file="anomalousroads.csv")
#### Note that these predictions are using incorrect roads data - need to fix.
# Grid$pred<-predict(G.K.M2, newdata=Grid,type="response",se.fit = TRUE, na.action = na.pass)
Syd_Covars<-Covars2[Covars2$Lat <= (-33.671774) & Covars2$Lat >= (-34.265774) & Covars2$Long <= (151.372906) & Covars2$Long >= (150.718096),]
Syd_KAB<-Syd_Covars[Syd_Covars$Source=="KAB",]
Syd_CSIRO<-Syd_Covars[Syd_Covars$Source=="CSIRO",]
Syd_CSall<-Syd_Covars[Syd_Covars$Source=="Emu" | Syd_Covars$Source==
"Transect" | Syd_Covars$Source=="CSIRO",]
###### TEST GRID COVARS AGAINST TRANSECT COVARS #####
Grid_subset<-Grid[Grid$UID %in% unique(Syd_Covars$UID_1),]
## for test
#Gridtest_subset<-Gridtest[Gridtest$UID %in% unique(Syd_Covars$UID_1),]
Syd_Covars_subset<-Syd_Covars[unique(Syd_Covars$UID_1),]
matchindex<-match(Grid_subset$UID, Syd_Covars$UID_1)
plot(Grid_subset$Eco_advan_50km, Syd_Covars$Eco_advan_50km[matchindex])
plot(Grid_subset$eco_resour50km, Syd_Covars$eco_resour50km[matchindex])
## with new grid variable
plot(Gridtest_subset$eco_resour5km, Syd_Covars$eco_resour5km[matchindex])
plot(Grid_subset$Pop_25km, Syd_Covars$Pop_25km_new[matchindex])
### some issues...trying to work them out ####
Syd_subset<-Syd_Covars[matchindex,]
index<-Syd_subset$Pop_50km<2000000
plot(Grid_subset$Pop_50km[index], Syd_subset$Pop_50km[index])
index2<-Syd_subset$Pop_25<400000
plot(Grid_subset$Pop_25km[index2],Syd_subset$Pop_25km[index2])
index3<-Grid_subset$UID[Grid_subset$eco_resour50km<850]
write.csv(Syd_subset[index2,], file="25km anomalies for TJ.csv")
write.csv(Syd_subset[Syd_subset$UID_1 %in% index3,],"50km eco_resource anomaly for TJ.csv")
### a few of the transects don't end up in the grid, I think because the grid cell was perhaps cut off.
## How shall we address this?
Grid$UID<-as.character(Grid$UID)
write.csv(unique(Syd_Covars$UID_1[Syd_Covars$UID_1 %nin% Grid$UID]), file="transectinwater.csv")
Syd_UID2<-paste(Syd_Covars$Long, Syd_Covars$Lat, sep="")
Grid_UID2<-paste(Grid$)
## TJ went back and changed the UID for these transects to the nearest UID.
## provide csv to Chris and Kimberley because they will need for Winddf and Waterdf and Distdf
write.csv(Grid[,c("UID","X","Y")], file="new UIDs for transit matrices.csv")
#drop_upload("~/Documents/R data/NESP/R scripts/LoadData.R", dest="/NESP/R scripts")
| /LoadData.R | no_license | Qamarsky/NESP | R | false | false | 13,507 | r | ###### Here is the url on how to use GitHub and RStudio together ####
#### note that you have to open a shell to add in new branches ###
### http://r-bio.github.io/intro-git-rstudio/ ###
##Open shell (Tools>Shell in R Studio) and give it the upstream address:
## git remote add upstream https://github.com/Qamarsky/NESP.git.
## Make sure that it worked by typing git remote -v,
## it should display 4 lines, 2 that start with origin and the address of your fork,
## and 2 that start with upstream and the address of the upstream repository.
# Note that here we used upstream to name the upstream repository but we could have given it another name
## Create a branch for the changes in R studio through the shell:
## git checkout -b proposed-fixes master. Proposed-fixes is our branch name within this R studio project.
### Once you have made changes, go to the GIT icon in the menu bar and commit changes.
## once changes are finished, open shell and type 'git push origin proposed-fixes' (Note that
# proposed-fixes is the name of the branch you chose when you originated this branch)
### to get the newest version of the files into R's folder use the command: git pull <remote> <branch>
## In this case we have set the remote file to be called origin, and the branch we are working on is master
## so command is: 'git pull origin master'
rm(list = ls())
#### required libraries ####
library(partykit)
library(maps)
library(ggmap)
library(mgcv)
library(ggplot2)
library(googleVis)
library(multcomp)
library(rdrop2)
library(httpuv)
library(Hmisc)
##### Add in transect data ####
drop_auth()
drop_dir(path="/NESP")
setwd("/Users/uqqschuy/Documents/R data/NESP/")
#Covars<-drop_read_csv("/NESP/Data/Transect data/Transect_data_all_170217.csv", stringsAsFactors=FALSE)
#Covarsx<-drop_read_csv("/NESP/data/transect data/Global/Global_dataset_290317_UID.csv", stringsAsFactors=FALSE)
Covars<-drop_read_csv("/NESP/data/transect data/Global/Global_dataset_170516_UID_SEIF2011RoadsFix.csv", stringsAsFactors=FALSE)
Landcov<-drop_read_csv("/NESP/Data/landuse.csv")
KABsite<-drop_read_csv("/NESP/Data/Transect data/KAB/KAB_site_types.csv")
KAB<- drop_read_csv ("/NESP/Data/Transect data/KAB/KAB_AllData_GlID_UID_08_05_17.csv", stringsAsFactors=FALSE)
CUA <- drop_read_csv("/NESP/Data/Transect data/CUA/CUA_AllData3_UID_22_2_17.csv", stringsAsFactors=FALSE)
CSIRO<-drop_read_csv ("/NESP/data/transect data/CSIRO/CSIRO_public_collection_only_22_2_17.csv")
Grid<-drop_read_csv("NESP/Data/Grid data/Syd_fishnet_centerpoints_covars_200430_inland.csv", stringsAsFactors=FALSE)
################## SKIP IF INTERNET CONNECTED ######################
############## for non-internet version: #########################
#write.csv(Grid, file="/Users/uqqschuy/Documents/R data/NESP/Syd_fishnet_centerpoints_covars_200430_inland.csv")
Covars<-read.csv("Global_dataset_80517_UID.csv", stringsAsFactors=FALSE) ### update this!!! ###
Landcov<-read.csv("landuse.csv")
KABsite<-read.csv("KAB_site_types.csv")
KAB<-read.csv("KAB_AllData_GlID_UID_08_05_17.csv")
CUA <- read.csv("CUA_AllData3_UID_22_2_17.csv", stringsAsFactors=FALSE)
CSIRO<-read.csv ("CSIRO_public_collection_only_22_2_17.csv")
Grid<-read.csv("Syd_fishnet_centerpoints_covars_200430_inland.csv", stringsAsFactors=FALSE)
################## START HERE ######################
Covars$Year <-as.POSIXlt(strptime(as.character(Covars$Date), format = "%d/%m/%Y"))$year+1900
Covars$Month <- as.POSIXlt(strptime(as.character(Covars$Date), format = "%d/%m/%Y"))$mon+1
Covars$Day <- as.POSIXlt(strptime(as.character(Covars$Date), format = "%d/%m/%Y"))$mday
Covars$Source<-as.factor(Covars$Source)
Covars$State<-as.factor(Covars$State)
##### KAB ######
## get total debris for KAB and put it into Covars dataset
KAB$Total_Debris<-rowSums(KAB[,grepl("No.of", names(KAB))], na.rm=TRUE)
KABmatch<-match(Covars$UID[Covars$Source=="KAB"], KAB$UID)
Covars$Total_Debris[Covars$Source=="KAB"]<-KAB$Total_Debris[KABmatch]
### Now let's look at CUA data ###
### Many of the categories are for some reason not numeric. Change those to numeric and then you can add the columns to get a total
CUA[,88]<-as.numeric(paste(CUA[,88]))
CUA[,89]<-as.numeric(paste(CUA[,89]))
CUA[,93]<-as.numeric(paste(CUA[,93]))
CUA[,94]<-as.numeric(paste(CUA[,94]))
CUA[,98]<-as.numeric(paste(CUA[,98]))
CUA[,99]<-as.numeric(paste(CUA[,99]))
CUA[,100]<-as.numeric(paste(CUA[,100]))
CUA[,101]<-as.numeric(paste(CUA[,101]))
CUA[,107]<-as.numeric(paste(CUA[,107]))
CUA[,114]<-as.numeric(paste(CUA[,114]))
CUA[,119]<-as.numeric(paste(CUA[,119]))
CUA[,120]<-as.numeric(paste(CUA[,120]))
CUA[,122]<-as.numeric(paste(CUA[,122]))
CUA[,123]<-as.numeric(paste(CUA[,123]))
CUA[,124]<-as.numeric(paste(CUA[,124]))
CUA[,125]<-as.numeric(paste(CUA[,125]))
CUA[,128]<-as.numeric(paste(CUA[,128]))
CUA[,135]<-as.numeric(paste(CUA[,135]))
CUA[,141]<-as.numeric(paste(CUA[,141]))
CUA[,143]<-as.numeric(paste(CUA[,143]))
CUA[,145]<-as.numeric(paste(CUA[,145]))
CUA[,147]<-as.numeric(paste(CUA[,147]))
CUA[,149]<-as.numeric(paste(CUA[,149]))
CUA[,150]<-as.numeric(paste(CUA[,150]))
CUA[,152]<-as.numeric(paste(CUA[,152]))
CUA[,156]<-as.numeric(paste(CUA[,156]))
CUA[,173]<-as.numeric(paste(CUA[,173]))
CUA$Total_Debris<-rowSums(CUA[,c(87:109,112:159,161,163:180)], na.rm=TRUE) ## for some reason many of these rows are not numeric
CUA$Latitude<--(CUA$Latitude)
### get site type
CUA$SiteType<-"Other"
CUA$SiteType[CUA$River=="Y" | CUA$River=="y"]<-"River"
CUA$SiteType[CUA$Parks=="Y" | CUA$Parks=="y"]<-"Recreational Park"
CUA$SiteType[CUA$Beach=="Y" | CUA$Beach=="y"]<-"Beach"
CUA$SiteType[CUA$Roadway=="Y" | CUA$Roadway=="y"]<-"Highway"
CUA$SiteType[CUA$PubBushland=="Y" | CUA$PubBushland=="y"]<-"Bushland"
CUA$SiteType[CUA$School=="Y" | CUA$School=="y"]<-"School"
CUA$SiteType[CUA$OutdoorTrans=="Y" | CUA$OutdoorTrans=="y"]<-"OutdoorTrans"
CUA$SiteType[CUA$ShopsMalls=="Y" | CUA$ShopsMalls=="y"]<-"Shopping Centre"
CUA$SiteType[CUA$DiveSite=="Y" | CUA$DiveSite=="y"]<-"DiveSite"
#### Total takes into account the length in meters of wire, pvc, etc. While total calc does not.
### first would be great to standardise by something... ###
### KAB data theoretically is standardised to a 1000m square area, according to cleanup protocols ###
### standardise CUA by Aream2 because Area km2 has lots of NAs for some reason. Let's standardise to 1000m2 like KAB
## Aream2 is a factor, so we have to change it to numeric.
CUA$Aream2<-as.numeric(CUA$Aream2)
CUA$Totalper1000m2<-CUA$Total_Debris/CUA$Aream2*1000
CUA.Sub<-CUA[is.na(CUA$Totalper1000m2)==FALSE,]
CUAmatch<-match(Covars$UID[Covars$Source=="CUA"], CUA$UID)
Covars$Total_Debris[Covars$Source=="CUA"]<-CUA$Total_Debris[CUAmatch]
Covars$SiteType[Covars$Source=="CUA"]<-CUA$SiteType[CUAmatch]
##### CSIRO DATA ######
CSIRO$Total_Debris<-rowSums(CSIRO[,5:236])
CSmatch<-match(Covars$UID[Covars$Source=="CSIRO"|Covars$Source=="Transect"|Covars$Source=="Emu"], CSIRO$UID)
Covars$Total_Debris[Covars$Source=="CSIRO"|Covars$Source=="Transect"|Covars$Source=="Emu"]<-CSIRO$Total_Debris[CSmatch]
#### massaging various covariates
#Covars$Area_m2<-as.numeric(Covars$Area_m2)
Covars$Area_m2[Covars$Source=="KAB"]<-1000
Covars$Totper1000<-(Covars$Total_Debris/Covars$Area_m2)*1000
Covars$All_roads_50<-rowSums(Covars[,c("DualCarriageRd50km","MinorRd50km","PrincialRd50km","SecondaryRd50km","Track50km")], na.rm=TRUE)
Covars$All_roads_5<-rowSums(Covars[,c("DualCarriageRd5km","MinorRd5km","PrincialRd5km","SecondaryRd5km","Track5km")], na.rm=TRUE)
Covars$Prim.land<-Landcov$PRIMARY_V7[match(Covars$Landuse_code, Landcov$Landuse)]
#Covars$State<-as.factor(Covars$State)
Covars$roads_5to50km_resids <- lm(Covars$All_roads_5 ~ Covars$All_roads_50)$residuals
Covars$Pop_5km[is.na(Covars$Pop_5km)==TRUE]<-0
Covars$Pop5to50km_resids<-lm(Covars$Pop_5km ~ Covars$Pop_50km)$residuals
Covars$SiteType[Covars$Source=="KAB"]<-as.character(KABsite$site_type[match(Covars$Global_ID[Covars$Source=="KAB"], KABsite$Global_ID)]) ### fix this
Covars$SiteType[Covars$SiteType=="Car park"]<-"Car Park"
Covars$SiteType[Covars$SiteType=="Retail"]<-"Retail Strip"
Covars$SiteType[Covars$SiteType=="Recreational"]<-"Recreational Park"
Covars$SiteType<-as.factor(Covars$SiteType)
## put in a log in case of doing gams.
Covars$Log_Debris<-log(Covars$Totper1000 +1)
## add number of drink containers ### - FIX THIS??
## Covars$Containers<-KAB$Containers[match(Covars$UID, KAB$UID)]
## add in site code
Covars$Sitecode<-KAB$sitecode.x[match(Covars$UID, KAB$UID)]
### remove NAs in Totper1000 ##
Covars2<-Covars[is.finite(Covars$Totper1000)==TRUE,]
## remove NAs in Prim.land (these are all on Heron Island)
Covars2<-Covars2[is.na(Covars2$Prim.land)==FALSE,]
### remove the two outliers (>10000 items - I think they have been mis-recorded)
Covars2<-Covars2[Covars2$Total_Debris<10000,]
## remove NAs in pop5km (these are in Melbourne for some reason)
#Covars2<-Covars2[is.na(Covars2$Pop_5km)==FALSE,]
## remove ones without a date
#Covars2[is.na(Covars2$Year)==TRUE,] ## I think this will take out all of the new data. Let's see if we can find a year for these
## problems with the SEIF missing some bits
IDs<-Covars2$Global_ID[is.na(Covars2$eco_resour50km)==TRUE & is.na(Covars$eco_resour25km)==FALSE]
write.csv(Covars2[is.na(Covars2$eco_resour50km)==TRUE & is.na(Covars$eco_resour25km)==FALSE,], "missingdata.csv")
IDs2<-Covars2$Global_ID[is.na(Covars2$Edu_occupa5km)==TRUE & is.na(Covars2$Edu_occupa1km)==FALSE]
###### Grid data #######
#GridSEIF<-drop_read_csv("NESP/Data/Grid data/Sydney_fishnet_centrepoints_seif2011_170412.csv", stringsAsFactors=FALSE) ### fix file name
#Grid[,17:36]<-GridSEIF[,6:25]
#write.csv(Grid, file="/Users/uqqschuy/Documents/R data/NESP/Syd_fishnet_centerpoints_covars_200430_inland.csv")
#drop_upload("/Users/uqqschuy/Documents/R data/NESP/Syd_fishnet_centerpoints_covars_200430_inland.csv", dest="/NESP/Data/Grid data")
Grid$State<-rep("NSW", times=dim(Grid)[1])
Grid$All_roads_50<-rowSums(Grid[,c("DualCarriageRd50km","MinorRd50km","PrincialRd50km","SecondaryRd50km","Track50km")], na.rm=TRUE)
Grid$All_roads_5<-rowSums(Grid[,c("DualCarriageRd50km","MinorRd50km","PrincialRd50km","SecondaryRd50km","Track50km")], na.rm=TRUE)
#Gridtest<-read.csv("/Users/uqqschuy/Documents/R data/NESP/NESP/Sydney_fishnet_centrepoints_seif2011_170412.csv", stringsAsFactors=FALSE)
#### There are a few where landuse is -9999 because the cells are close to the water or on the water.
# Change these to water landcover
## Note that there were other cells where landuse was 0, TJ has changed them to nearest landuse value.
Grid$Landuse[Grid$Landuse<0]<-"663"
Grid$Prim.land<-Landcov$PRIMARY_V7[match(Grid$Landuse, Landcov$Landuse)]
Grid$roads_5to50km_resids<-lm(Grid$All_roads_5 ~ Grid$All_roads_50)$residuals
Grid$Pop5to50km_resids<-lm(Grid$Pop_5km ~ Grid$Pop_50km)$residuals
#### CHECKING DATA #####
#length(Grid2$UID[Grid2$Landuse==(-9999)])
### For some reason there are missing 5 and 50km roads.
wrongroads<-Grid$UID[Grid$All_roads_5==0| Grid$All_roads_50==0]
write.csv (wrongroads, file="anomalousroads.csv")
#### Note that these predictions are using incorrect roads data - need to fix.
# Grid$pred<-predict(G.K.M2, newdata=Grid,type="response",se.fit = TRUE, na.action = na.pass)
Syd_Covars<-Covars2[Covars2$Lat <= (-33.671774) & Covars2$Lat >= (-34.265774) & Covars2$Long <= (151.372906) & Covars2$Long >= (150.718096),]
Syd_KAB<-Syd_Covars[Syd_Covars$Source=="KAB",]
Syd_CSIRO<-Syd_Covars[Syd_Covars$Source=="CSIRO",]
Syd_CSall<-Syd_Covars[Syd_Covars$Source=="Emu" | Syd_Covars$Source==
"Transect" | Syd_Covars$Source=="CSIRO",]
###### TEST GRID COVARS AGAINST TRANSECT COVARS #####
Grid_subset<-Grid[Grid$UID %in% unique(Syd_Covars$UID_1),]
## for test
#Gridtest_subset<-Gridtest[Gridtest$UID %in% unique(Syd_Covars$UID_1),]
Syd_Covars_subset<-Syd_Covars[unique(Syd_Covars$UID_1),]
matchindex<-match(Grid_subset$UID, Syd_Covars$UID_1)
plot(Grid_subset$Eco_advan_50km, Syd_Covars$Eco_advan_50km[matchindex])
plot(Grid_subset$eco_resour50km, Syd_Covars$eco_resour50km[matchindex])
## with new grid variable
plot(Gridtest_subset$eco_resour5km, Syd_Covars$eco_resour5km[matchindex])
plot(Grid_subset$Pop_25km, Syd_Covars$Pop_25km_new[matchindex])
### some issues...trying to work them out ####
Syd_subset<-Syd_Covars[matchindex,]
index<-Syd_subset$Pop_50km<2000000
plot(Grid_subset$Pop_50km[index], Syd_subset$Pop_50km[index])
index2<-Syd_subset$Pop_25<400000
plot(Grid_subset$Pop_25km[index2],Syd_subset$Pop_25km[index2])
index3<-Grid_subset$UID[Grid_subset$eco_resour50km<850]
write.csv(Syd_subset[index2,], file="25km anomalies for TJ.csv")
write.csv(Syd_subset[Syd_subset$UID_1 %in% index3,],"50km eco_resource anomaly for TJ.csv")
### a few of the transects don't end up in the grid, I think because the grid cell was perhaps cut off.
## How shall we address this?
Grid$UID<-as.character(Grid$UID)
write.csv(unique(Syd_Covars$UID_1[Syd_Covars$UID_1 %nin% Grid$UID]), file="transectinwater.csv")
Syd_UID2<-paste(Syd_Covars$Long, Syd_Covars$Lat, sep="")
Grid_UID2<-paste(Grid$)
## TJ went back and changed the UID for these transects to the nearest UID.
## provide csv to Chris and Kimberley because they will need for Winddf and Waterdf and Distdf
write.csv(Grid[,c("UID","X","Y")], file="new UIDs for transit matrices.csv")
#drop_upload("~/Documents/R data/NESP/R scripts/LoadData.R", dest="/NESP/R scripts")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Colombia.R
\name{Colombia}
\alias{Colombia}
\title{Colombia Class for downloading, cleaning and processing notification data}
\source{
\url{https://github.com/danielcs88/colombia_covid-19/}
}
\description{
Information for downloading, cleaning
and processing COVID-19 region data for Colombia
}
\examples{
\dontrun{
region <- Colombia$new(verbose = TRUE, steps = TRUE, get = TRUE)
region$return()
}
}
\seealso{
Subnational data sources
\code{\link{Belgium}},
\code{\link{Brazil}},
\code{\link{Canada}},
\code{\link{Covid19DataHub}},
\code{\link{Cuba}},
\code{\link{France}},
\code{\link{Germany}},
\code{\link{Google}},
\code{\link{India}},
\code{\link{Italy}},
\code{\link{JHU}},
\code{\link{Lithuania}},
\code{\link{Mexico}},
\code{\link{Netherlands}},
\code{\link{SouthAfrica}},
\code{\link{UK}},
\code{\link{USA}}
}
\concept{dataset}
\concept{subnational}
\section{Super class}{
\code{\link[covidregionaldata:DataClass]{covidregionaldata::DataClass}} -> \code{Colombia}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{origin}}{name of origin to fetch data for}
\item{\code{supported_levels}}{A list of supported levels.}
\item{\code{supported_region_names}}{A list of region names in order of level.}
\item{\code{supported_region_codes}}{A list of region codes in order of level.}
\item{\code{common_data_urls}}{List of named links to raw data.}
\item{\code{source_data_cols}}{existing columns within the raw data}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-set_region_codes}{\code{Colombia$set_region_codes()}}
\item \href{#method-clean_common}{\code{Colombia$clean_common()}}
\item \href{#method-clone}{\code{Colombia$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="available_regions">}\href{../../covidregionaldata/html/DataClass.html#method-available_regions}{\code{covidregionaldata::DataClass$available_regions()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="clean">}\href{../../covidregionaldata/html/DataClass.html#method-clean}{\code{covidregionaldata::DataClass$clean()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="download">}\href{../../covidregionaldata/html/DataClass.html#method-download}{\code{covidregionaldata::DataClass$download()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="filter">}\href{../../covidregionaldata/html/DataClass.html#method-filter}{\code{covidregionaldata::DataClass$filter()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="get">}\href{../../covidregionaldata/html/DataClass.html#method-get}{\code{covidregionaldata::DataClass$get()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="initialize">}\href{../../covidregionaldata/html/DataClass.html#method-initialize}{\code{covidregionaldata::DataClass$initialize()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="process">}\href{../../covidregionaldata/html/DataClass.html#method-process}{\code{covidregionaldata::DataClass$process()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="return">}\href{../../covidregionaldata/html/DataClass.html#method-return}{\code{covidregionaldata::DataClass$return()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="summary">}\href{../../covidregionaldata/html/DataClass.html#method-summary}{\code{covidregionaldata::DataClass$summary()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="test">}\href{../../covidregionaldata/html/DataClass.html#method-test}{\code{covidregionaldata::DataClass$test()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-set_region_codes"></a>}}
\if{latex}{\out{\hypertarget{method-set_region_codes}{}}}
\subsection{Method \code{set_region_codes()}}{
Set up a table of region codes for clean data
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Colombia$set_region_codes()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clean_common"></a>}}
\if{latex}{\out{\hypertarget{method-clean_common}{}}}
\subsection{Method \code{clean_common()}}{
Colombia specific state level data cleaning
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Colombia$clean_common()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Colombia$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
| /man/Colombia.Rd | permissive | kathsherratt/covidregionaldata | R | false | true | 5,409 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Colombia.R
\name{Colombia}
\alias{Colombia}
\title{Colombia Class for downloading, cleaning and processing notification data}
\source{
\url{https://github.com/danielcs88/colombia_covid-19/}
}
\description{
Information for downloading, cleaning
and processing COVID-19 region data for Colombia
}
\examples{
\dontrun{
region <- Colombia$new(verbose = TRUE, steps = TRUE, get = TRUE)
region$return()
}
}
\seealso{
Subnational data sources
\code{\link{Belgium}},
\code{\link{Brazil}},
\code{\link{Canada}},
\code{\link{Covid19DataHub}},
\code{\link{Cuba}},
\code{\link{France}},
\code{\link{Germany}},
\code{\link{Google}},
\code{\link{India}},
\code{\link{Italy}},
\code{\link{JHU}},
\code{\link{Lithuania}},
\code{\link{Mexico}},
\code{\link{Netherlands}},
\code{\link{SouthAfrica}},
\code{\link{UK}},
\code{\link{USA}}
}
\concept{dataset}
\concept{subnational}
\section{Super class}{
\code{\link[covidregionaldata:DataClass]{covidregionaldata::DataClass}} -> \code{Colombia}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{origin}}{name of origin to fetch data for}
\item{\code{supported_levels}}{A list of supported levels.}
\item{\code{supported_region_names}}{A list of region names in order of level.}
\item{\code{supported_region_codes}}{A list of region codes in order of level.}
\item{\code{common_data_urls}}{List of named links to raw data.}
\item{\code{source_data_cols}}{existing columns within the raw data}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-set_region_codes}{\code{Colombia$set_region_codes()}}
\item \href{#method-clean_common}{\code{Colombia$clean_common()}}
\item \href{#method-clone}{\code{Colombia$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="available_regions">}\href{../../covidregionaldata/html/DataClass.html#method-available_regions}{\code{covidregionaldata::DataClass$available_regions()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="clean">}\href{../../covidregionaldata/html/DataClass.html#method-clean}{\code{covidregionaldata::DataClass$clean()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="download">}\href{../../covidregionaldata/html/DataClass.html#method-download}{\code{covidregionaldata::DataClass$download()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="filter">}\href{../../covidregionaldata/html/DataClass.html#method-filter}{\code{covidregionaldata::DataClass$filter()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="get">}\href{../../covidregionaldata/html/DataClass.html#method-get}{\code{covidregionaldata::DataClass$get()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="initialize">}\href{../../covidregionaldata/html/DataClass.html#method-initialize}{\code{covidregionaldata::DataClass$initialize()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="process">}\href{../../covidregionaldata/html/DataClass.html#method-process}{\code{covidregionaldata::DataClass$process()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="return">}\href{../../covidregionaldata/html/DataClass.html#method-return}{\code{covidregionaldata::DataClass$return()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="summary">}\href{../../covidregionaldata/html/DataClass.html#method-summary}{\code{covidregionaldata::DataClass$summary()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="covidregionaldata" data-topic="DataClass" data-id="test">}\href{../../covidregionaldata/html/DataClass.html#method-test}{\code{covidregionaldata::DataClass$test()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-set_region_codes"></a>}}
\if{latex}{\out{\hypertarget{method-set_region_codes}{}}}
\subsection{Method \code{set_region_codes()}}{
Set up a table of region codes for clean data
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Colombia$set_region_codes()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clean_common"></a>}}
\if{latex}{\out{\hypertarget{method-clean_common}{}}}
\subsection{Method \code{clean_common()}}{
Colombia specific state level data cleaning
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Colombia$clean_common()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Colombia$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
tiff("plot_annualConsumption.tiff", width=480, height=580, pointsize=14)
FRAM_ch = array(0,c(nPredator,max(nAge),nYear))
for(y in 1:nYear)
{
for(p in 1:4)
for(a in 1:nChAge)
{
tmp = sum(CH_hisayt[[p]][,,a,y,])
FRAM_ch[p,a,y] = tmp
}
}
par(mfrow=c(2,2), mai=c(1,1.,0.4,0))
for(p in 1:4)
{
matplot(t(FRAM_ch[p,,])/1000, type="l",
ylab="",
xlab="",
axes=FALSE,
ylim=c(0,max(1,FRAM_ch[p,,])/1000),
lty=1,
col=1:5,
lwd=2)
axis(1,at=1:nYear, labels=years, cex=1.3)
text(3,max(t(FRAM_ch[p,,])/1000), paste0("( ", letters[p], " )"), cex=1.3)
if(p==2)
legend(0,max(t(FRAM_ch[p,,])/1000),legend=paste("Ocean age", 0:4), lty=1, lwd=2, col=1:nChAge, bty="n", cex=1.3)
axis(2)
box()
}
par(mai=c(0,0,0,0), new=TRUE, fig=c(0,1,0,1))
plot(1, type="n", axes=FALSE, xaxs="i", yaxs="i", xlim=c(0,1), ylim=c(0,1))
text(0.05,0.5, "Number of Chinook consumed (thousands)", srt=90, cex=1.2, )
dev.off()
| /plot_annualConsumption.r | no_license | bchasco/Inland | R | false | false | 1,062 | r | tiff("plot_annualConsumption.tiff", width=480, height=580, pointsize=14)
FRAM_ch = array(0,c(nPredator,max(nAge),nYear))
for(y in 1:nYear)
{
for(p in 1:4)
for(a in 1:nChAge)
{
tmp = sum(CH_hisayt[[p]][,,a,y,])
FRAM_ch[p,a,y] = tmp
}
}
par(mfrow=c(2,2), mai=c(1,1.,0.4,0))
for(p in 1:4)
{
matplot(t(FRAM_ch[p,,])/1000, type="l",
ylab="",
xlab="",
axes=FALSE,
ylim=c(0,max(1,FRAM_ch[p,,])/1000),
lty=1,
col=1:5,
lwd=2)
axis(1,at=1:nYear, labels=years, cex=1.3)
text(3,max(t(FRAM_ch[p,,])/1000), paste0("( ", letters[p], " )"), cex=1.3)
if(p==2)
legend(0,max(t(FRAM_ch[p,,])/1000),legend=paste("Ocean age", 0:4), lty=1, lwd=2, col=1:nChAge, bty="n", cex=1.3)
axis(2)
box()
}
par(mai=c(0,0,0,0), new=TRUE, fig=c(0,1,0,1))
plot(1, type="n", axes=FALSE, xaxs="i", yaxs="i", xlim=c(0,1), ylim=c(0,1))
text(0.05,0.5, "Number of Chinook consumed (thousands)", srt=90, cex=1.2, )
dev.off()
|
library(data.table)
# read file
powerConsumption <- fread("household_power_consumption.txt")
# subsetting data for date "2007-02-01"and "2007-02-02"
powData <- subset(powerConsumption,powerConsumption$Date=="1/2/2007"
| powerConsumption$Date=="2/2/2007")
# Convert Date format
powData$Date <- as.Date(powData$Date,"%d/%m/%Y")
# Create a new DateTime column
dateTime <- paste0(powData$Date," ",powData$Time)
powData$DateTime <- as.POSIXct(dateTime)
# turn on png
png(filename = "plot4.png",
width = 480, height = 480, units = "px")
# plot
par(mfrow = c (2,2))
## subplot 1
plot(as.numeric(powData$Global_active_power) ~ powData$DateTime
, type="l"
, xlab= " "
, ylab = "Global Active Power (killowatts)")
## subplot 2
plot(as.numeric(powData$Voltage) ~ powData$DateTime
, type="l"
, xlab= "datetime"
, ylab = "Voltage")
## subplot 3
plot(as.numeric(powData$Sub_metering_1) ~ powData$DateTime
, type="l"
, xlab= " "
, ylab = "Energy sub metering")
lines(as.numeric(powData$Sub_metering_2) ~ powData$DateTime
, col = "red")
lines(as.numeric(powData$Sub_metering_3) ~ powData$DateTime
, col = "blue")
legend("topright"
, col=c("black", "red", "blue")
, bty = "n"
,legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## subplot 4
plot(as.numeric(powData$Global_reactive_power) ~ powData$DateTime
, type="l"
, xlab= "Global_reactive_power"
, ylab = "Voltage")
dev.off()
| /plot4.R | no_license | xlicf/ExData_Plotting1 | R | false | false | 1,576 | r | library(data.table)
# read file
powerConsumption <- fread("household_power_consumption.txt")
# subsetting data for date "2007-02-01"and "2007-02-02"
powData <- subset(powerConsumption,powerConsumption$Date=="1/2/2007"
| powerConsumption$Date=="2/2/2007")
# Convert Date format
powData$Date <- as.Date(powData$Date,"%d/%m/%Y")
# Create a new DateTime column
dateTime <- paste0(powData$Date," ",powData$Time)
powData$DateTime <- as.POSIXct(dateTime)
# turn on png
png(filename = "plot4.png",
width = 480, height = 480, units = "px")
# plot
par(mfrow = c (2,2))
## subplot 1
plot(as.numeric(powData$Global_active_power) ~ powData$DateTime
, type="l"
, xlab= " "
, ylab = "Global Active Power (killowatts)")
## subplot 2
plot(as.numeric(powData$Voltage) ~ powData$DateTime
, type="l"
, xlab= "datetime"
, ylab = "Voltage")
## subplot 3
plot(as.numeric(powData$Sub_metering_1) ~ powData$DateTime
, type="l"
, xlab= " "
, ylab = "Energy sub metering")
lines(as.numeric(powData$Sub_metering_2) ~ powData$DateTime
, col = "red")
lines(as.numeric(powData$Sub_metering_3) ~ powData$DateTime
, col = "blue")
legend("topright"
, col=c("black", "red", "blue")
, bty = "n"
,legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## subplot 4
plot(as.numeric(powData$Global_reactive_power) ~ powData$DateTime
, type="l"
, xlab= "Global_reactive_power"
, ylab = "Voltage")
dev.off()
|
library(maptools)
library(ggplot2)
require("rgdal") # requires sp, will use proj.4 if installed
require("maptools")
require("ggplot2")
require("plyr")
require(sp)
require(rgdal)
require(maps)
# Set working directory
setwd("C:\\Users\\akshata\\Documents\\DataVizProject\\shapefile")
# reading data/shapefile
zip_area <- readShapePoly("ZIP_CODE_040114.shp")
# Plot Area
zip = readOGR(dsn=".", layer="ZIP_CODE_040114")
zip@data$id = rownames(zip@data)
zip.points = fortify(zip, region="id")
zip.df = join(zip.points, zip@data, by="id")
#ggplot(zip.df, aes(x=long, y=lat, group = group)) + geom_point()
#ggplot() + geom_path(data = zip_area, aes(x=long, y=lat, group = group))
summary(zip_area)
#Join Shapefile with Taxi Data
class(green_sample)
coordinates(green_sample) <- ~Pickup_longitude+Pickup_latitude
proj4string(green_sample)<- proj4string(zip)
proj4string(green_sample)<- CRS("+proj=longlat +datum=NAD83")
green_sample<- spTransform(green_sample, CRS(proj4string(zip)))
identical(proj4string(green_sample), proj4string(zip))
summary(zip)
proj4string(zip)
#Only the locations inside the map
inside_zip<- !is.na(over(green_sample, as(zip, "SpatialPolygons")))
mean(inside_zip)
#inside_zip<- over(green_sample, as(zip, "SpatialPolygons"))
green_map <- as.data.frame(inside_zip)
#Compute ZIPs
green_sample$zip <- over(green_sample, zip)
green <- as.data.frame(green_sample)
green['population']<-green$zip['POPULATION']
green['area']<-green$zip['AREA']
green['po_name']<-green$zip['PO_NAME']
green['county']<-green$zip['COUNTY']
green['inside_zip'] <- green_map['inside_zip']
green['zip']<-green$zip['ZIPCODE']
#write.csv(green, "greenTaxiWithZip_all.csv")
#Draw the map
green_viz <- subset(green, green$inside_zip != 'FALSE')
ggplot() + geom_path(data = zip, aes(x=long, y=lat, group = group)) + geom_point(data = green_viz, aes(x= Pickup_longitude, y=Pickup_latitude), color = "green")
#Save data for all the points inside the map
#write.csv(green_viz, "green_viz.csv")
#Repeate the procedure for Dropoff locations
#Join Shapefile with Taxi Data
class(green_viz)
coordinates(green_viz) <- ~Dropoff_longitude+Dropoff_latitude
proj4string(green_viz)<- proj4string(zip)
proj4string(green_viz)<- CRS("+proj=longlat +datum=NAD83")
green_viz<- spTransform(green_viz, CRS(proj4string(zip)))
identical(proj4string(green_viz), proj4string(zip))
summary(zip)
proj4string(zip)
#Only the locations inside the map
inside_zip<- !is.na(over(green_viz, as(zip, "SpatialPolygons")))
mean(inside_zip)
#inside_zip<- over(green_sample, as(zip, "SpatialPolygons"))
green_map_drop <- as.data.frame(inside_zip)
#Compute ZIPs
green_viz$drop_zip <- over(green_viz, zip)
green <- as.data.frame(green_viz)
green['drop_population']<-green$drop_zip['POPULATION']
green['drop_area']<-green$drop_zip['AREA']
green['drop_po_name']<-green$drop_zip['PO_NAME']
green['drop_county']<-green$drop_zip['COUNTY']
green['drop_inside_zip'] <- green_map_drop['inside_zip']
green['drop_zip']<-green$drop_zip['ZIPCODE']
#write.csv(green, "greenTaxiWithZip_all.csv")
#Draw the map
green_viz <- subset(green, green$drop_inside_zip != 'FALSE')
ggplot() + geom_path(data = zip, aes(x=long, y=lat, group = group)) + geom_point(data = green_viz, aes(x= Dropoff_longitude, y=Dropoff_latitude), color = "green")
#Save data for all the points inside the map
write.csv(green_viz, "green_viz_July.csv")
#Remove all objects in R
rm(list = ls()) | /Scripts/02 Taxi_Pickup_Green_NYC.R | no_license | akshata92/NYC-Green-Taxi-Data-Visualization | R | false | false | 3,509 | r | library(maptools)
library(ggplot2)
require("rgdal") # requires sp, will use proj.4 if installed
require("maptools")
require("ggplot2")
require("plyr")
require(sp)
require(rgdal)
require(maps)
# Set working directory
setwd("C:\\Users\\akshata\\Documents\\DataVizProject\\shapefile")
# reading data/shapefile
zip_area <- readShapePoly("ZIP_CODE_040114.shp")
# Plot Area
zip = readOGR(dsn=".", layer="ZIP_CODE_040114")
zip@data$id = rownames(zip@data)
zip.points = fortify(zip, region="id")
zip.df = join(zip.points, zip@data, by="id")
#ggplot(zip.df, aes(x=long, y=lat, group = group)) + geom_point()
#ggplot() + geom_path(data = zip_area, aes(x=long, y=lat, group = group))
summary(zip_area)
#Join Shapefile with Taxi Data
class(green_sample)
coordinates(green_sample) <- ~Pickup_longitude+Pickup_latitude
proj4string(green_sample)<- proj4string(zip)
proj4string(green_sample)<- CRS("+proj=longlat +datum=NAD83")
green_sample<- spTransform(green_sample, CRS(proj4string(zip)))
identical(proj4string(green_sample), proj4string(zip))
summary(zip)
proj4string(zip)
#Only the locations inside the map
inside_zip<- !is.na(over(green_sample, as(zip, "SpatialPolygons")))
mean(inside_zip)
#inside_zip<- over(green_sample, as(zip, "SpatialPolygons"))
green_map <- as.data.frame(inside_zip)
#Compute ZIPs
green_sample$zip <- over(green_sample, zip)
green <- as.data.frame(green_sample)
green['population']<-green$zip['POPULATION']
green['area']<-green$zip['AREA']
green['po_name']<-green$zip['PO_NAME']
green['county']<-green$zip['COUNTY']
green['inside_zip'] <- green_map['inside_zip']
green['zip']<-green$zip['ZIPCODE']
#write.csv(green, "greenTaxiWithZip_all.csv")
#Draw the map
green_viz <- subset(green, green$inside_zip != 'FALSE')
ggplot() + geom_path(data = zip, aes(x=long, y=lat, group = group)) + geom_point(data = green_viz, aes(x= Pickup_longitude, y=Pickup_latitude), color = "green")
#Save data for all the points inside the map
#write.csv(green_viz, "green_viz.csv")
#Repeate the procedure for Dropoff locations
#Join Shapefile with Taxi Data
class(green_viz)
coordinates(green_viz) <- ~Dropoff_longitude+Dropoff_latitude
proj4string(green_viz)<- proj4string(zip)
proj4string(green_viz)<- CRS("+proj=longlat +datum=NAD83")
green_viz<- spTransform(green_viz, CRS(proj4string(zip)))
identical(proj4string(green_viz), proj4string(zip))
summary(zip)
proj4string(zip)
#Only the locations inside the map
inside_zip<- !is.na(over(green_viz, as(zip, "SpatialPolygons")))
mean(inside_zip)
#inside_zip<- over(green_sample, as(zip, "SpatialPolygons"))
green_map_drop <- as.data.frame(inside_zip)
#Compute ZIPs
green_viz$drop_zip <- over(green_viz, zip)
green <- as.data.frame(green_viz)
green['drop_population']<-green$drop_zip['POPULATION']
green['drop_area']<-green$drop_zip['AREA']
green['drop_po_name']<-green$drop_zip['PO_NAME']
green['drop_county']<-green$drop_zip['COUNTY']
green['drop_inside_zip'] <- green_map_drop['inside_zip']
green['drop_zip']<-green$drop_zip['ZIPCODE']
#write.csv(green, "greenTaxiWithZip_all.csv")
#Draw the map
green_viz <- subset(green, green$drop_inside_zip != 'FALSE')
ggplot() + geom_path(data = zip, aes(x=long, y=lat, group = group)) + geom_point(data = green_viz, aes(x= Dropoff_longitude, y=Dropoff_latitude), color = "green")
#Save data for all the points inside the map
write.csv(green_viz, "green_viz_July.csv")
#Remove all objects in R
rm(list = ls()) |
library(shiny)
library(openxlsx)
library(plotly)
# Load in the data
setwd("C:/Users/Andrew/Documents/Data Science Masters/Doing Data Science/Case Study 2/Case Study 2 Project/")
casestudy <- read.xlsx("Data/CaseStudy2-data.xlsx", sheet= 1, colNames= T)
# Columns 9, 10, 22, and 27 have no meaningful data. Remove them.
casestudy <- casestudy[,-c(9, 10, 22, 27)]
parameters <- colnames(casestudy) # This will be used for the plot
# ui.R definition
fluidPage(
h2("Exploratory Data Analysis"),
h4("Change the vertical and horizontal axes to compare different parameters"),
# Vertical space
tags$hr(),
# Feature selection
fixedRow(
column(3, selectInput(inputId = "featureInput1", label = "Vertical axis", choices = parameters, selected = "MonthlyIncome")),
column(4, selectInput(inputId = "featureInput2", label = "Horizontal axis", choices = parameters, selected = "JobRole"))),
# First row
fixedRow(
column(6, plotlyOutput("Plot1", height = "500px", width= "500px")))) | /ui.r | no_license | lavos84/MSDS-6306-Second-Case-Study | R | false | false | 1,035 | r | library(shiny)
library(openxlsx)
library(plotly)
# Load in the data
setwd("C:/Users/Andrew/Documents/Data Science Masters/Doing Data Science/Case Study 2/Case Study 2 Project/")
casestudy <- read.xlsx("Data/CaseStudy2-data.xlsx", sheet= 1, colNames= T)
# Columns 9, 10, 22, and 27 have no meaningful data. Remove them.
casestudy <- casestudy[,-c(9, 10, 22, 27)]
parameters <- colnames(casestudy) # This will be used for the plot
# ui.R definition
fluidPage(
h2("Exploratory Data Analysis"),
h4("Change the vertical and horizontal axes to compare different parameters"),
# Vertical space
tags$hr(),
# Feature selection
fixedRow(
column(3, selectInput(inputId = "featureInput1", label = "Vertical axis", choices = parameters, selected = "MonthlyIncome")),
column(4, selectInput(inputId = "featureInput2", label = "Horizontal axis", choices = parameters, selected = "JobRole"))),
# First row
fixedRow(
column(6, plotlyOutput("Plot1", height = "500px", width= "500px")))) |
library("DALEX")
library(drifter)
HELP_LINK <- 'https://modeloriented.github.io/drifter/'
DOCS_LINK <- 'https://modeloriented.github.io/drifter/reference/check_drift.html'
renderModelDriftTables <- function(data_set, factor_columns){
factor_data <- vector()
factor_data <- NULL
if(length(factor_columns) > 0) {
for(i in 1:length(factor_columns)){
column_number <- factor_columns[[i]]
column_name <- names(factor_columns)[i]
temp_table <- kable_styling(kable(table(data_set[, column_number]), col.names = c(column_name, "Frequency")), bootstrap_options = c("responsive", "bordered", "hover"),full_width = FALSE)
factor_data <- paste(factor_data, temp_table, sep="<br>")
}
}
return(factor_data)
}
renderTable <- function(data_table) {
return(kable_styling(kable(data_table), bootstrap_options = c("responsive", "bordered", "hover"), full_width = FALSE))
}
renderDrifterSection <- function(section_name, data_table) {
section_header <- paste0("<h3 class='section-label'>", section_name, "</h3>")
section_data <- renderTable(data_table)
return(paste0("<div class='column'>", section_header, section_data, "</div>"))
}
generator <- function(explainer_pairs, options, img_folder) {
drifter_data <- ""
for(pair in explainer_pairs) {
old_explainer <- pair[[1]]
new_explainer <- pair[[2]]
drifter_data <- paste0(drifter_data, "<div class='row'>", "<h3 class='model-label'>", old_explainer$label, "</h3>")
drift <- check_drift(old_explainer$model, new_explainer$model,
old_explainer$data, new_explainer$data,
old_explainer$y, new_explainer$y,
predict_function = old_explainer$predict_function)
archivist_link <- save_to_repository(drift, options)
model_drift <- renderDrifterSection("Model Drift", drift$model_drift)
covariate_drift <- renderDrifterSection("Covariate Drift", data.frame(Variable = drift$covariate_drift$variables , Drift = drift$covariate_drift$drift))
residual_drift <- renderDrifterSection("Residual Drift", drift$residual_drift)
help_section <- paste0("<help-panel help-url='", HELP_LINK, "' docs-url='", DOCS_LINK, "'></help-panel>")
archivist_section <- paste0("<div class='archivist-code'><p>Get this object: <code>", archivist_link, "</code></p></div>")
drifter_data <- paste0(drifter_data, model_drift, covariate_drift, residual_drift, help_section, "</div>", archivist_section)
}
list(
display_name='Drifter',
name='drifter',
data=list(drifter_data = drifter_data)
)
}
| /inst/extdata/modules/drifter/generator.R | no_license | subodhk26/modelDown | R | false | false | 2,588 | r | library("DALEX")
library(drifter)
HELP_LINK <- 'https://modeloriented.github.io/drifter/'
DOCS_LINK <- 'https://modeloriented.github.io/drifter/reference/check_drift.html'
renderModelDriftTables <- function(data_set, factor_columns){
factor_data <- vector()
factor_data <- NULL
if(length(factor_columns) > 0) {
for(i in 1:length(factor_columns)){
column_number <- factor_columns[[i]]
column_name <- names(factor_columns)[i]
temp_table <- kable_styling(kable(table(data_set[, column_number]), col.names = c(column_name, "Frequency")), bootstrap_options = c("responsive", "bordered", "hover"),full_width = FALSE)
factor_data <- paste(factor_data, temp_table, sep="<br>")
}
}
return(factor_data)
}
renderTable <- function(data_table) {
return(kable_styling(kable(data_table), bootstrap_options = c("responsive", "bordered", "hover"), full_width = FALSE))
}
renderDrifterSection <- function(section_name, data_table) {
section_header <- paste0("<h3 class='section-label'>", section_name, "</h3>")
section_data <- renderTable(data_table)
return(paste0("<div class='column'>", section_header, section_data, "</div>"))
}
generator <- function(explainer_pairs, options, img_folder) {
drifter_data <- ""
for(pair in explainer_pairs) {
old_explainer <- pair[[1]]
new_explainer <- pair[[2]]
drifter_data <- paste0(drifter_data, "<div class='row'>", "<h3 class='model-label'>", old_explainer$label, "</h3>")
drift <- check_drift(old_explainer$model, new_explainer$model,
old_explainer$data, new_explainer$data,
old_explainer$y, new_explainer$y,
predict_function = old_explainer$predict_function)
archivist_link <- save_to_repository(drift, options)
model_drift <- renderDrifterSection("Model Drift", drift$model_drift)
covariate_drift <- renderDrifterSection("Covariate Drift", data.frame(Variable = drift$covariate_drift$variables , Drift = drift$covariate_drift$drift))
residual_drift <- renderDrifterSection("Residual Drift", drift$residual_drift)
help_section <- paste0("<help-panel help-url='", HELP_LINK, "' docs-url='", DOCS_LINK, "'></help-panel>")
archivist_section <- paste0("<div class='archivist-code'><p>Get this object: <code>", archivist_link, "</code></p></div>")
drifter_data <- paste0(drifter_data, model_drift, covariate_drift, residual_drift, help_section, "</div>", archivist_section)
}
list(
display_name='Drifter',
name='drifter',
data=list(drifter_data = drifter_data)
)
}
|
##======= Handle arguments from command line ========
# setup R error handline to go to stderr
options(show.error.messages=FALSE,
error=function(){
cat(geterrmessage(), file=stderr())
quit("no", 1, F)
})
# we need that to not crash galaxy with an UTF8 error on German LC settings.
loc = Sys.setlocale("LC_MESSAGES", "en_US.UTF-8")
# suppress warning
options(warn = -1)
options(stringsAsFactors=FALSE, useFancyQuotes=FALSE)
args = commandArgs(trailingOnly=TRUE)
suppressPackageStartupMessages({
library(getopt)
library(tools)
})
# column 1: the long flag name
# column 2: the short flag alias. A SINGLE character string
# column 3: argument mask
# 0: no argument
# 1: argument required
# 2: argument is optional
# column 4: date type to which the flag's argument shall be cast.
# possible values: logical, integer, double, complex, character.
spec_list=list()
##------- 1. input data ---------------------
spec_list$ECHO = c('echo', 'e', '1', 'character')
spec_list$CONSTRUCT_NETWORK_WORKSPACE = c('construct_network_workspace', 'w', '1', 'character')
spec_list$SOFT_THRESHOLD_POWER = c('soft_threshold_power', 'p', '2', 'double')
spec_list$PLOT_GENES = c('plot_genes', 'n', '1', 'integer')
##--------2. output report and report site directory --------------
spec_list$OUTPUT_HTML = c('wgcna_eigengene_visualization_html', 'o', '1', 'character')
spec_list$OUTPUT_DIR = c('wgcna_eigengene_visualization_dir', 'd', '1', 'character')
##--------3. Rmd templates in the tool directory ----------
spec_list$WGCNA_EIGENGENE_VISUALIZATION_RMD = c('wgcna_eigengene_visualization_rmd', 'M', '1', 'character')
##------------------------------------------------------------------
spec = t(as.data.frame(spec_list))
opt = getopt(spec)
# arguments are accessed by long flag name (the first column in the spec matrix)
# NOT by element name in the spec_list
# example: opt$help, opt$expression_file
##====== End of arguments handling ==========
#------ Load libraries ---------
library(rmarkdown)
library(WGCNA)
library(DT)
library(htmltools)
library(ggplot2)
#----- 1. create the report directory ------------------------
system(paste0('mkdir -p ', opt$wgcna_eigengene_visualization_dir))
#----- 2. generate Rmd files with Rmd templates --------------
# a. templates without placeholder variables:
# copy templates from tool directory to the working directory.
# b. templates with placeholder variables:
# substitute variables with user input values and place them in the working directory.
#----- 01 wgcna_eigengene_visualization.Rmd -----------------------
readLines(opt$wgcna_eigengene_visualization_rmd) %>%
(function(x) {
gsub('ECHO', opt$echo, x)
}) %>%
(function(x) {
gsub('CONSTRUCT_NETWORK_WORKSPACE', opt$construct_network_workspace, x)
}) %>%
(function(x) {
gsub('SOFT_THRESHOLD_POWER', opt$soft_threshold_power, x)
}) %>%
(function(x) {
gsub('PLOT_GENES', opt$plot_genes, x)
}) %>%
(function(x) {
gsub('OUTPUT_DIR', opt$wgcna_eigengene_visualization_dir, x)
}) %>%
(function(x) {
fileConn = file('wgcna_eigengene_visualization.Rmd')
writeLines(x, con=fileConn)
close(fileConn)
})
#------ 3. render all Rmd files --------
render('wgcna_eigengene_visualization.Rmd', output_file = opt$wgcna_eigengene_visualization_html)
#-------4. manipulate outputs -----------------------------
| /old-tools/legacy/rmarkdown_wgcna/wgcna_eigengene_visualization_render.R | permissive | statonlab/aurora-galaxy-tools | R | false | false | 3,482 | r | ##======= Handle arguments from command line ========
# setup R error handline to go to stderr
options(show.error.messages=FALSE,
error=function(){
cat(geterrmessage(), file=stderr())
quit("no", 1, F)
})
# we need that to not crash galaxy with an UTF8 error on German LC settings.
loc = Sys.setlocale("LC_MESSAGES", "en_US.UTF-8")
# suppress warning
options(warn = -1)
options(stringsAsFactors=FALSE, useFancyQuotes=FALSE)
args = commandArgs(trailingOnly=TRUE)
suppressPackageStartupMessages({
library(getopt)
library(tools)
})
# column 1: the long flag name
# column 2: the short flag alias. A SINGLE character string
# column 3: argument mask
# 0: no argument
# 1: argument required
# 2: argument is optional
# column 4: date type to which the flag's argument shall be cast.
# possible values: logical, integer, double, complex, character.
spec_list=list()
##------- 1. input data ---------------------
spec_list$ECHO = c('echo', 'e', '1', 'character')
spec_list$CONSTRUCT_NETWORK_WORKSPACE = c('construct_network_workspace', 'w', '1', 'character')
spec_list$SOFT_THRESHOLD_POWER = c('soft_threshold_power', 'p', '2', 'double')
spec_list$PLOT_GENES = c('plot_genes', 'n', '1', 'integer')
##--------2. output report and report site directory --------------
spec_list$OUTPUT_HTML = c('wgcna_eigengene_visualization_html', 'o', '1', 'character')
spec_list$OUTPUT_DIR = c('wgcna_eigengene_visualization_dir', 'd', '1', 'character')
##--------3. Rmd templates in the tool directory ----------
spec_list$WGCNA_EIGENGENE_VISUALIZATION_RMD = c('wgcna_eigengene_visualization_rmd', 'M', '1', 'character')
##------------------------------------------------------------------
spec = t(as.data.frame(spec_list))
opt = getopt(spec)
# arguments are accessed by long flag name (the first column in the spec matrix)
# NOT by element name in the spec_list
# example: opt$help, opt$expression_file
##====== End of arguments handling ==========
#------ Load libraries ---------
library(rmarkdown)
library(WGCNA)
library(DT)
library(htmltools)
library(ggplot2)
#----- 1. create the report directory ------------------------
system(paste0('mkdir -p ', opt$wgcna_eigengene_visualization_dir))
#----- 2. generate Rmd files with Rmd templates --------------
# a. templates without placeholder variables:
# copy templates from tool directory to the working directory.
# b. templates with placeholder variables:
# substitute variables with user input values and place them in the working directory.
#----- 01 wgcna_eigengene_visualization.Rmd -----------------------
readLines(opt$wgcna_eigengene_visualization_rmd) %>%
(function(x) {
gsub('ECHO', opt$echo, x)
}) %>%
(function(x) {
gsub('CONSTRUCT_NETWORK_WORKSPACE', opt$construct_network_workspace, x)
}) %>%
(function(x) {
gsub('SOFT_THRESHOLD_POWER', opt$soft_threshold_power, x)
}) %>%
(function(x) {
gsub('PLOT_GENES', opt$plot_genes, x)
}) %>%
(function(x) {
gsub('OUTPUT_DIR', opt$wgcna_eigengene_visualization_dir, x)
}) %>%
(function(x) {
fileConn = file('wgcna_eigengene_visualization.Rmd')
writeLines(x, con=fileConn)
close(fileConn)
})
#------ 3. render all Rmd files --------
render('wgcna_eigengene_visualization.Rmd', output_file = opt$wgcna_eigengene_visualization_html)
#-------4. manipulate outputs -----------------------------
|
# Logit transformation to use for percentages
logit <- function(value) {
if (!(all(na.omit(value) >= 0) && all(na.omit(value) <= 1)))
stop("values must be between 0 and 1.")
log( value / (1 - value) )
}
# Inverse of the logit transformation
invlogit <- function(value) {
1 / (1 + exp(-value))
}
| /src/utilities/logit.R | permissive | sritchie73/UKB_NMR_QC_paper | R | false | false | 306 | r | # Logit transformation to use for percentages
logit <- function(value) {
if (!(all(na.omit(value) >= 0) && all(na.omit(value) <= 1)))
stop("values must be between 0 and 1.")
log( value / (1 - value) )
}
# Inverse of the logit transformation
invlogit <- function(value) {
1 / (1 + exp(-value))
}
|
This is my first R script
| /First R Script.R | no_license | caiweixian/introtobda | R | false | false | 26 | r | This is my first R script
|
enter <-
function(argument, example = "none", what = "", ...){
cat("\n\tWhat is the argument ", mark(argument, F),
" ? - ex: ", mark(example, F), "\n\t\t")
scan(what = what, ...)
}
| /ilc/R/enter.R | no_license | ingted/R-Examples | R | false | false | 197 | r | enter <-
function(argument, example = "none", what = "", ...){
cat("\n\tWhat is the argument ", mark(argument, F),
" ? - ex: ", mark(example, F), "\n\t\t")
scan(what = what, ...)
}
|
#YOU MIGHT WANT TO ADD SOME LIBRARIES HERE
###########################################
#read-in & setup data frame & clean up data
###########################################
can <- read.csv("C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/Downloads/curr.csv", header=TRUE, skip=6)
plan.data <- read.csv("C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/Downloads/plans.csv", header=TRUE)
plan.data <- as.data.table(plan.data)
plan.grower <- read.csv("C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/Downloads/plansbyga.csv", header=TRUE)
plan.grower <- as.data.table(plan.grower)
yr <- as.numeric(format(Sys.Date(),"%y"))
#Get only pertinent columns
loc <- which(colnames(can)=="Loc." )
grw <- which(colnames(can)=="Grower")
cat <- which(colnames(can)=="Category")
bsz <- which(colnames(can)=="Base.Size")
osd <- which(colnames(can)=="Orig..Str.Dt")
ppl <- which(colnames(can)=="Prod..Plan")
ppq <- which(colnames(can)=="Prod..Plan.EQU")
grp <- which(colnames(can)=="Gross..Prod.Qty")
gre <- which(colnames(can)=="EQU..Prod.Qty")
sto <- which(colnames(can)=="Std.Oper")
ldt <- which(colnames(can)=="Labor.Date")
wos <- which(colnames(can)=="WO.Stat")
opc <- which(colnames(can)=="Oper..Complete")
qty <- which(colnames(can)=="Orig..Ord.Qty")
equ <- which(colnames(can)=="Orig..Ord.EQU")
wip <- which(colnames(can)=="WIP.Ord.")
#put column numbers in vector
clmns <- c(loc,grw,cat,bsz,osd,ppl,sto,ldt,wos,opc,qty,equ,wip,grp,gre,ppq)
#create new matrix with just pertinent columns
can.1 <- can[,clmns]
#delete unneeded rows
can.2 <- can.1[!(can.1$Base.Size == ""),]
can.2 <- can.2[!(can.2$Grower == 500051),]
can.2 <- can.2[!(can.2$Grower == 500008),]
#change VH to CA
can.2$Loc.[can.2$Loc. == 180000]<-170000
can.2$WO.Stat[is.na(can.2$WO.Stat)]<-0
#create columns for month and year
###find length of date and use if to get dates
dt.1 <- as.numeric(substring(as.character(can.2$Orig..Str.Dt[1]),7,8))
#to handle whether date is held like "xx/xx/xx" or like "xxxxxxxxx"
if(dt.1 + 1 == yr|dt.1 == yr){
#function to separate month from string
unlst.mo <- function(x){
newcol <- unlist(strsplit(x, "[/]"))[1]
}
unlst.yr <- function(x){
newcol <- substring(unlist(strsplit(x, "[/]"))[3],3,4)
}
can.2$st.dt <- as.character(can.2$Orig..Str.Dt)
can.2$mo <- lapply(can.2$st.dt,unlst.mo)
can.2$yr <- lapply(can.2$st.dt,unlst.yr)
can.2$lb.mo <- lapply(as.character(can.2$Labor.Date),unlst.mo)
can.2$lb.yr <- lapply(as.character(can.2$Labor.Date),unlst.yr)
}else{
library(date)
can.2$st.dt <- as.Date(can.2$Orig..Str.Dt,origin="1899-12-30")
#start month
can.2$mo <- format(can.2$st.dt,"%m")
#start year
can.2$yr <- format(can.2$st.dt,"%y")
}
#remove comma characters so that columns are no longer factors and can have functions applied
can.2$Prod..Plan <- as.numeric(gsub(",","", can.2$Prod..Plan))
can.2$Prod..Plan.EQU <- as.numeric(gsub(",","", can.2$Prod..Plan.EQU))
can.2$Gross..Prod.Qty <- as.numeric(gsub(",","", can.2$Gross..Prod.Qty))
can.2$EQU..Prod.Qty <- as.numeric(gsub(",","", can.2$EQU..Prod.Qty))
can.2$Orig..Ord.Qty <- as.numeric(gsub(",","", can.2$Orig..Ord.Qty))
can.2$Orig..Ord.EQU <- as.numeric(gsub(",","", can.2$Orig..Ord.EQU))
##################################
#setup df with only necessary data
#######################################################
#to run this for production plan put "plan" for sum, for completed put "qty" for sum
library(data.table)
a <- data.table(loc=can.2$Loc.,
cat=can.2$Category,
qty=can.2$Orig..Ord.Qty,
month=can.2$mo,
year=can.2$yr,
grw=can.2$Grower,
plan=can.2$Prod..Plan,
grpr=can.2$Gross..Prod.Qty,
gre=can.2$EQU..Prod.Qty,
equ.pr=can.2$Prod..Plan.EQU,
size=can.2$Base.Size,
equ=can.2$Orig..Ord.EQU,
wo = can.2$WO.Stat,
lb.mo = can.2$lb.mo,
lb.yr = can.2$lb.yr,
st.op = can.2$Std.Oper)
a <- a[grw!=0,]
##################################
#variables
#########################################
###################################
grow.loc <- c(160000,170000,550000)
grow.loc.rng <- 1:length(grow.loc)
GA.or.ca <- c(500001,500003,500004)
GA.ga <- c(500001,500002)
or.ca.rng <- 1:length(GA.or.ca)
ga.rng <- 1:length(GA.ga)
#prior month
mo.1 <- as.numeric(format(Sys.Date(),"%m"))-1
mo = mo.1
#december handler
if(mo.1 == 0) mo <- 12
#function to create dataframe for a given location and growing area for complete YTD or just month
createframe <- function(location, GA, n=1, year=FALSE){
#n is 1 for "size" or 2 for "cat"
kbys = c("size","cat")
if(year==TRUE){
temp.a <- a[loc==location & year==yr & month<=mo & grw==GA & wo!=15 & wo!=99 & wo != 0,
list(orig.qty=sum(qty),prod=sum(plan)),keyby=eval(kbys[n])]
temp.b <- a[loc==location & lb.yr == yr & lb.mo <= mo & grw==GA & wo != 15 & wo != 99 & wo != 0,
list(grss=sum(grpr)), keyby = eval(kbys[n])]
temp.a <- merge(temp.a,temp.b,by=eval(kbys[n]),all=TRUE)
temp.a[is.na(temp.a)]<-0
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
if(n==1){
newrow <- data.frame(size=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
}else newrow <- data.frame(cat=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
temp.a <- rbind(temp.a,newrow)
}else{
temp.a <- a[loc==location & year==yr & month==mo & grw==GA & wo!=15 & wo!=99 & wo != 0,
list(orig.qty=sum(qty),prod=sum(plan),grss=sum(grpr)),keyby=eval(kbys[n])]
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
if(n==1){
newrow <- data.frame(size=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
}else newrow <- data.frame(cat=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
temp.a <- rbind(temp.a,newrow)
}
temp.a
}
yr.ca.sz.500001 <- createframe(170000,500001,year=TRUE)
yr.ca.sz.500003 <- createframe(170000,500003,year=TRUE)
yr.ca.sz.500004 <- createframe(170000,500004,year=TRUE)
yr.ca.cat.500001 <- createframe(170000,500001,n=2,year=TRUE)
yr.ca.cat.500003 <- createframe(170000,500003,n=2,year=TRUE)
yr.ca.cat.500004 <- createframe(170000,500004,n=2,year=TRUE)
yr.or.sz.500001 <- createframe(160000,500001,year=TRUE)
yr.or.sz.500003 <- createframe(160000,500003,year=TRUE)
yr.or.sz.500004 <- createframe(160000,500004,year=TRUE)
yr.or.cat.500001 <- createframe(160000,500001,n=2,year=TRUE)
yr.or.cat.500003 <- createframe(160000,500003,n=2,year=TRUE)
yr.or.cat.500004 <- createframe(160000,500004,n=2,year=TRUE)
yr.ga.sz.500001 <- createframe(550000,500001,year=TRUE)
yr.ga.sz.500002 <- createframe(550000,500002,year=TRUE)
yr.ga.cat.500001 <- createframe(550000,500001,n=2,year=TRUE)
yr.ga.cat.500002 <- createframe(550000,500002,n=2,year=TRUE)
mo.ca.sz.500001 <- createframe(170000,500001)
mo.ca.sz.500003 <- createframe(170000,500003)
mo.ca.sz.500004 <- createframe(170000,500004)
mo.ca.cat.500001 <- createframe(170000,500001,n=2)
mo.ca.cat.500003 <- createframe(170000,500003,n=2)
mo.ca.cat.500004 <- createframe(170000,500004,n=2)
mo.or.sz.500001 <- createframe(160000,500001)
mo.or.sz.500003 <- createframe(160000,500003)
mo.or.sz.500004 <- createframe(160000,500004)
mo.or.cat.500001 <- createframe(160000,500001,n=2)
mo.or.cat.500003 <- createframe(160000,500003,n=2)
mo.or.cat.500004 <- createframe(160000,500004,n=2)
mo.ga.sz.500001 <- createframe(550000,500001)
mo.ga.sz.500002 <- createframe(550000,500002)
mo.ga.cat.500001 <- createframe(550000,500001,n=2)
mo.ga.cat.500002 <- createframe(550000,500002,n=2)
########################################
#by future
########################################
#########################################
createfuture <- function(location, GA, n=1){
kbys = c("size","cat")
mo.1 <- as.numeric(format(Sys.Date(),"%m"))
mo <- mo.1
yr <- as.numeric(format(Sys.Date(),"%y"))
temp.a <- a[loc==location & year==yr & month==mo & grw==GA,
list(prod=sum(plan)),keyby=eval(kbys[n])]
mo <- mo.1 + 1
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.b <- a[loc==location & year==yr & month==mo & grw==GA,
list(prod=sum(plan)),keyby=eval(kbys[n])]
mo <- mo.1 + 2
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.c <- a[loc==location & year==yr & month==mo & grw==GA,
list(prod=sum(plan)),keyby=eval(kbys[n])]
temp.a <- merge(temp.a,temp.b,keyby=eval(kbys[n]),all=TRUE)
temp.a <- merge(temp.a, temp.c,keyby=eval(kbys[n]),all=TRUE)
temp.a[is.na(temp.a)]<-0
temp.a$Total <- temp.a$prod.x + temp.a$prod.y + temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$prod.x)
c3 <- sum(temp.a$prod.y)
c4 <- sum(temp.a$prod)
c5 <- sum(temp.a$Total)
if(n==1){
newrow <- data.frame(size = c1, prod.x = c2, prod.y = c3, prod = c4, Total = c5)
}else newrow <- data.frame(cat = c1, prod.x = c2, prod.y = c3, prod = c4, Total = c5)
temp.a <- rbind(temp.a, newrow)
return(temp.a)
}
ft.ca.sz.500001 <- createfuture(170000,500001)
ft.ca.sz.500003 <- createfuture(170000,500003)
ft.ca.sz.500004 <- createfuture(170000,500004)
ft.ca.cat.500001 <- createfuture(170000,500001,n=2)
ft.ca.cat.500003 <- createfuture(170000,500003,n=2)
ft.ca.cat.500004 <- createfuture(170000,500004,n=2)
ft.or.sz.500001 <- createfuture(160000,500001)
ft.or.sz.500003 <- createfuture(160000,500003)
ft.or.sz.500004 <- createfuture(160000,500004)
ft.or.cat.500001 <- createfuture(160000,500001,n=2)
ft.or.cat.500003 <- createfuture(160000,500003,n=2)
ft.or.cat.500004 <- createfuture(160000,500004,n=2)
ft.ga.sz.500001 <- createfuture(550000,500001)
ft.ga.sz.500002 <- createfuture(550000,500002)
ft.ga.cat.500001 <- createfuture(550000,500001,n=2)
ft.ga.cat.500002 <- createfuture(550000,500002,n=2)
###########################################
#Propagation
############################################
##########################################
for(i in grow.loc.rng){
#mo data
mo.1 <- as.numeric(format(Sys.Date(),"%m")) - 1
#december handler
if(mo.1 == 0) mo.1 <- 12
mo <- mo.1
nam <- paste("prop.mo.",grow.loc[i],sep="")
p.mo <- a[loc == grow.loc[i] & year == yr & month == mo & grw == 500007 & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(qty),prod=sum(plan),grss=sum(grpr)),
keyby = st.op]
p.mo[is.na(p.mo)]<-0
p.mo$perc <- p.mo$grss/p.mo$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(p.mo$orig.qty)
c3 <- sum(p.mo$prod)
c4 <- sum(p.mo$grss)
c5 <- c4/c3
newrow <- data.frame(st.op = c1, orig.qty = c2, prod = c3, grss = c4, perc = c5)
p.mo <- rbind(p.mo, newrow)
assign(nam,p.mo)
#yr data
nam <- paste("prop.yr.",grow.loc[i],sep="")
p.yr <- a[loc == grow.loc[i] & year == yr & month <= mo & grw == 500007 & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(qty),prod=sum(plan)),
keyby = st.op]
temp.a <- a[loc == grow.loc[i] & lb.yr == yr & lb.mo <= mo & grw == 500007 & wo != 15 & wo != 99 & wo != 0,
list(grss=sum(grpr)),
keyby = st.op]
p.yr <- merge(p.yr,temp.a,by="st.op",all=TRUE)
p.yr[is.na(p.yr)]<-0
p.yr$perc <- p.yr$grss/p.yr$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(p.yr$orig.qty)
c3 <- sum(p.yr$prod)
c4 <- sum(p.yr$grss)
c5 <- c4/c3
newrow <- data.frame(st.op = c1, orig.qty = c2, prod = c3, grss = c4, perc = c5)
p.yr <- rbind(p.yr, newrow)
assign(nam,p.yr)
#ft data
mo <- as.numeric(mo.1) + 1
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
nam <- paste("prop.ft.",grow.loc[i],sep="")
temp.a <- a[loc == grow.loc[i] & year == yr & month == mo & grw == 500007,
list(prod=sum(plan)),
keyby = st.op]
mo <- as.numeric(mo.1) + 2
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.b <- a[loc == grow.loc[i] & year == yr & month == mo & grw == 500007,
list(prod=sum(plan)),
keyby = st.op]
mo <- as.numeric(mo.1) + 3
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.c <- a[loc == grow.loc[i] & year == yr & month == mo & grw == 500007,
list(prod=sum(plan)),
keyby = st.op]
temp.a <- merge(temp.a,temp.b,by="st.op",all=TRUE)
temp.a <- merge(temp.a, temp.c,by="st.op",all=TRUE)
temp.a[is.na(temp.a)]<-0
temp.a$sums <- temp.a$prod.x + temp.a$prod.y + temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$prod.x)
c3 <- sum(temp.a$prod.y)
c4 <- sum(temp.a$prod)
c5 <- c2 + c3 + c4
newrow <- data.frame(st.op = c1, prod.x = c2, prod.y = c3, prod = c4, sums = c5)
temp.a <- rbind(temp.a, newrow)
assign(nam,temp.a)
}
###########################################
#write files to folder
###########################################
#############################################
#ca by size
##ga 1
ca.sz.1 <- merge(mo.ca.sz.500001,yr.ca.sz.500001,by="size",all=TRUE)
ca.sz.1 <- merge(ca.sz.1,ft.ca.sz.500001,by="size",all=TRUE)
ca.sz.1[is.na(ca.sz.1)] <- 0
write.csv(ca.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.1.csv")
##ga 3
ca.sz.3 <- merge(mo.ca.sz.500003,yr.ca.sz.500003,by="size",all=TRUE)
ca.sz.3 <- merge(ca.sz.3,ft.ca.sz.500003,by="size",all=TRUE)
ca.sz.3[is.na(ca.sz.3)] <- 0
write.csv(ca.sz.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.3.csv")
##ga 4
ca.sz.4 <- merge(mo.ca.sz.500004,yr.ca.sz.500004,by="size",all=TRUE)
ca.sz.4 <- merge(ca.sz.4,ft.ca.sz.500004,by="size",all=TRUE)
ca.sz.4[is.na(ca.sz.4)] <- 0
write.csv(ca.sz.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.4.csv")
#ca by cat
##ga 1
ca.cat.1 <- merge(mo.ca.cat.500001,yr.ca.cat.500001,by="cat",all=TRUE)
ca.cat.1 <- merge(ca.cat.1,ft.ca.cat.500001,by="cat",all=TRUE)
ca.cat.1[is.na(ca.cat.1)] <- 0
write.csv(ca.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.1.csv")
##ga 3
ca.cat.3 <- merge(mo.ca.cat.500003,yr.ca.cat.500003,by="cat",all=TRUE)
ca.cat.3 <- merge(ca.cat.3,ft.ca.cat.500003,by="cat",all=TRUE)
ca.cat.3[is.na(ca.cat.3)] <- 0
write.csv(ca.cat.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.3.csv")
##ga 4
ca.cat.4 <- merge(mo.ca.cat.500004,yr.ca.cat.500004,by="cat",all=TRUE)
ca.cat.4 <- merge(ca.cat.4,ft.ca.cat.500004,by="cat",all=TRUE)
ca.cat.4[is.na(ca.cat.4)] <- 0
write.csv(ca.cat.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.4.csv")
##prop
ca.prop <- merge(prop.mo.170000,prop.yr.170000,by="st.op",all=TRUE)
ca.prop <- merge(ca.prop,prop.ft.170000,by="st.op",all=TRUE)
ca.prop[is.na(ca.prop)] <- 0
write.csv(ca.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.zprop.csv")
#or by size
or.sz.1 <- merge(mo.or.sz.500001,yr.or.sz.500001,by="size",all=TRUE)
or.sz.1 <- merge(or.sz.1,ft.or.sz.500001,by="size",all=TRUE)
or.sz.1[is.na(or.sz.1)] <- 0
write.csv(or.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.1.csv")
##ga 3
or.sz.3 <- merge(mo.or.sz.500003,yr.or.sz.500003,by="size",all=TRUE)
or.sz.3 <- merge(or.sz.3,ft.or.sz.500003,by="size",all=TRUE)
or.sz.3[is.na(or.sz.3)] <- 0
write.csv(or.sz.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.3.csv")
##ga 4
or.sz.4 <- merge(mo.or.sz.500004,yr.or.sz.500004,by="size",all=TRUE)
or.sz.4 <- merge(or.sz.4,ft.or.sz.500004,by="size",all=TRUE)
or.sz.4[is.na(or.sz.4)] <- 0
write.csv(or.sz.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.4.csv")
#or by cat
##ga 1
or.cat.1 <- merge(mo.or.cat.500001,yr.or.cat.500001,by="cat",all=TRUE)
or.cat.1 <- merge(or.cat.1,ft.or.cat.500001,by="cat",all=TRUE)
or.cat.1[is.na(or.cat.1)] <- 0
write.csv(or.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.1.csv")
##ga 3
or.cat.3 <- merge(mo.or.cat.500003,yr.or.cat.500003,by="cat",all=TRUE)
or.cat.3 <- merge(or.cat.3,ft.or.cat.500003,by="cat",all=TRUE)
or.cat.3[is.na(or.cat.3)] <- 0
write.csv(or.cat.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.3.csv")
##ga 4
or.cat.4 <- merge(mo.or.cat.500004,yr.or.cat.500004,by="cat",all=TRUE)
or.cat.4 <- merge(or.cat.4,ft.or.cat.500004,by="cat",all=TRUE)
or.cat.4[is.na(or.cat.4)] <- 0
write.csv(or.cat.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.4.csv")
##prop
or.prop <- merge(prop.mo.160000,prop.yr.160000,by="st.op",all=TRUE)
or.prop <- merge(or.prop,prop.ft.160000,by="st.op",all=TRUE)
or.prop[is.na(or.prop)] <- 0
write.csv(or.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.zprop.csv")
#ga by size
#ga 1
ga.sz.1 <- merge(mo.ga.sz.500001,yr.ga.sz.500001,by="size",all=TRUE)
ga.sz.1 <- merge(ga.sz.1,ft.ga.sz.500001,by="size",all=TRUE)
ga.sz.1[is.na(ga.sz.1)] <- 0
write.csv(ga.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.sz.1.csv")
##ga 2
ga.sz.2 <- merge(mo.ga.sz.500002,yr.ga.sz.500002,by="size",all=TRUE)
ga.sz.2 <- merge(ga.sz.2,ft.ga.sz.500002,by="size",all=TRUE)
ga.sz.2[is.na(ga.sz.2)] <- 0
write.csv(ga.sz.2,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.sz.2.csv")
#ga by cat
##ga 1
ga.cat.1 <- merge(mo.ga.cat.500001,yr.ga.cat.500001,by="cat",all=TRUE)
ga.cat.1 <- merge(ga.cat.1,ft.ga.cat.500001,by="cat",all=TRUE)
ga.cat.1[is.na(ga.cat.1)] <- 0
write.csv(ga.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.cat.1.csv")
##ga 2
ga.cat.2 <- merge(mo.ga.cat.500002,yr.ga.cat.500002,by="cat",all=TRUE)
ga.cat.2 <- merge(ga.cat.2,ft.ga.cat.500002,by="cat",all=TRUE)
ga.cat.2[is.na(ga.cat.2)] <- 0
write.csv(ga.cat.2,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.cat.2.csv")
##prop
ga.prop <- merge(prop.mo.550000,prop.yr.550000,by="st.op",all=TRUE)
ga.prop <- merge(ga.prop,prop.ft.550000,by="st.op",all=TRUE)
ga.prop[is.na(ga.prop)] <- 0
write.csv(ga.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.zprop.csv")
###########################################
#SUMMARIES
####################################
####################################
mo.1 <- as.numeric(format(Sys.Date(),"%m"))-1
#december handler
if(mo.1 == 0) mo.1 <- 12
mo <- mo.1
##################################
#summary by mo
########################################
#####################################
for(i in grow.loc.rng){
nam <- paste("mo.", grow.loc[i],".summary",sep="")
temp.a <- a[loc == grow.loc[i] & year == yr & month == mo & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(equ),grss=sum(gre)),
keyby = grw]
#get plan from planbygr.csv file
temp.a1 <- plan.grower[LOCATION==grow.loc[i] & MONTH==mo, list(prod=sum(PLAN)),by=grw]
temp.a <- data.table(grw=temp.a$grw,orig.qty=temp.a$orig.qty,prod=temp.a1$prod,grss=temp.a$grss)
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
newrow <- data.frame(grw = c1, orig.qty = c2, prod = c3, grss = c4, perc.comp = c5)
temp.a <- rbind(temp.a, newrow)
assign(nam,temp.a)
}
##################################
#summary by yr
########################################
#########################################
for(i in grow.loc.rng){
nam <- paste("yr.", grow.loc[i],".summary",sep="")
temp.a <- a[loc == grow.loc[i] & year == yr & month <= mo & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(equ)),
keyby = grw]
#get plan from planbygr.csv file
temp.a1 <- plan.grower[LOCATION==grow.loc[i] & MONTH<=mo, list(prod=sum(PLAN)),by=grw]
temp.b <- a[loc == grow.loc[i] & lb.yr == yr & lb.mo <= mo & wo != 15 & wo != 99 & wo != 0,
list(grss=sum(gre)),
keyby = grw]
temp.a <- data.table(grw=temp.a$grw,orig.qty=temp.a$orig.qty,prod=temp.a1$prod,grss=temp.b$grss)
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
newrow <- data.frame(grw = c1, orig.qty = c2, prod = c3, grss = c4, perc.comp = c5)
temp.a <- rbind(temp.a, newrow)
assign(nam,temp.a)
}
##################################
#summary by ft
########################################
######################################
for(i in or.ca.rng){
mo.1 <- as.numeric(format(Sys.Date(),"%m"))
mo <- mo.1
nam <- paste("ft.",grow.loc[i],".summary",sep="")
temp.a <- a[loc == grow.loc[i] & year == yr & month == mo,
list(prod=sum(equ.pr)),
keyby = grw]
mo <- as.numeric(mo.1) + 1
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.b <- a[loc == grow.loc[i] & year == yr & month == mo,
list(prod=sum(equ.pr)),
keyby = grw]
mo <- as.numeric(mo.1) + 2
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.c <- a[loc == grow.loc[i] & year == yr & month == mo,
list(prod=sum(equ.pr)),
keyby = grw]
temp.a <- merge(temp.a,temp.b,by="grw",all=TRUE)
temp.a <- merge(temp.a, temp.c,by="grw",all=TRUE)
temp.a[is.na(temp.a)]<-0
temp.a$Total <- temp.a$prod.x + temp.a$prod.y + temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$prod.x)
c3 <- sum(temp.a$prod.y)
c4 <- sum(temp.a$prod)
c5 <- sum(temp.a$Total)
newrow <- data.frame(grw = c1, prod.x = c2, prod.y = c3, prod = c4, Total = c5)
temp.a <- rbind(temp.a, newrow)
assign(nam,temp.a)
}
##################################
#merge summaries and write to csv
##################################
##################################
ca.summary <- merge(mo.170000.summary,yr.170000.summary,by="grw",all=TRUE)
ca.summary <- merge(ca.summary,ft.170000.summary,by="grw",all=TRUE)
ca.summary[is.na(ca.summary)] <- 0
write.csv(ca.summary,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.summary.csv")
or.summary <- merge(mo.160000.summary,yr.160000.summary,by="grw",all=TRUE)
or.summary <- merge(or.summary,ft.160000.summary,by="grw",all=TRUE)
or.summary[is.na(or.summary)] <- 0
write.csv(or.summary,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.summary.csv")
ga.summary <- merge(mo.550000.summary,yr.550000.summary,by="grw",all=TRUE)
ga.summary <- merge(ga.summary,ft.550000.summary,by="grw",all=TRUE)
ga.summary[is.na(ga.summary)] <- 0
write.csv(ga.summary,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.summary.csv")
######################################
#all locations summary
######################################
loc.sums <- list(or.summary,ca.summary,ga.summary)
loc.nams <- c("Oregon","California","Georgia")
temp.tab <- or.summary[orig.qty.x < 0,]
for(i in 1:length(loc.sums)){
temp.a <- loc.sums[[i]][nrow(loc.sums[[i]]),]
temp.a[1, 1] <- loc.nams[i]
temp.tab <- rbind(temp.tab,temp.a)
}
####READ IN PLAN DATA AND REPLACE CURRENT PLAN####
#GET ACTUAL PLAN DATA AS OF BEGINNING OF YEAR
curr.month <- as.numeric(format(Sys.Date(),"%m"))-1
temp.tab <- as.data.table(temp.tab)
#replace current month plan numbers
temp.tab[[3]][1] <- plan.data$OR_SEP[curr.month]
temp.tab[[3]][2] <- plan.data$CA_SEP[curr.month]
temp.tab[[3]][3] <- plan.data$GA_SEP[curr.month]
temp.tab[[7]][1] <- plan.data$OREGON[curr.month]
temp.tab[[7]][2] <- plan.data$CALIFORNIA[curr.month]
temp.tab[[7]][3] <- plan.data$GEORGIA[curr.month]
#update percent complete colums
temp.tab[[5]] <- temp.tab[[4]]/temp.tab[[3]]
temp.tab[[9]] <- temp.tab[[8]]/temp.tab[[7]]
#Add totals row
totals.row <- data.table("Totals",sum(temp.tab[[2]]),sum(temp.tab[[3]]),sum(temp.tab[[4]]),
round(sum(temp.tab[[4]])/sum(temp.tab[[3]]),4),
sum(temp.tab[[6]]),sum(temp.tab[[7]]),sum(temp.tab[[8]]),
round(sum(temp.tab[[8]])/sum(temp.tab[[7]]),4),
sum(temp.tab[[10]]),sum(temp.tab[[11]]),sum(temp.tab[[12]]),sum(temp.tab[[13]]))
all.locs.totals <- rbind(temp.tab,totals.row,use.names=FALSE)
write.csv(all.locs.totals,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/all.locs.total.csv")
###########################################
#CUMULATIVE CHARTS
###########################################
#change year to last year
yr <- as.numeric(format(Sys.Date(),"%y")) - 1
mo.labs <-c("1","2","3","4","5","6","7","8","9","10","11","12")
gr.areas <- c(500001,500003,500004,500007)
ga.gr.areas <- c(500001,500002,500007)
gr.labs <- c("1","3","4","7")
ga.gr.labs <- c("1","2","7")
#function for all months last year completed
moFunc <- function(locs, yr, mo.labs, a){
for(i in 1:12){
oldProd <- a[loc == locs & lb.yr == yr & lb.mo == mo.labs[i] & wo != 15 & wo != 99,
list(gross=sum(gre)),
keyby = grw]
if(i == 1){
one <- oldProd
} else one <- merge(one, oldProd,by="grw",all=TRUE)
newnm <- as.character(mo.labs[i])
setnames(one,"gross",newnm)
}
one[is.na(one)]<-0
return(one)
}
#function for all months this year planned
moFuncEQU <- function(locs, yr, mo.labs, a){
for(i in 1:mo){
oldProd <- plan.grower[LOCATION == locs & MONTH == mo.labs[i],
list(plan=sum(PLAN)), keyby = grw]
if(i == 1){
one <- oldProd
} else one <- merge(one, oldProd,by="grw",all=TRUE)
newnm <- as.character(mo.labs[i])
setnames(one,"plan",newnm)
}
mo <- as.numeric(format(Sys.Date(),"%m")) - 1
mo.1 <- mo + 1
for(i in mo.1:12){
oldProd <- a[loc == locs & year == yr & month == mo.labs[i],
list(plan=sum(equ.pr)),
keyby = grw]
one <- merge(one, oldProd,by="grw",all=TRUE)
newnm <- as.character(mo.labs[i])
setnames(one,"plan",newnm)
}
one[is.na(one)]<-0
return(one)
}
#ca
ca.old <- moFunc(170000,yr,mo.labs,a)
#or
or.old <- moFunc(160000,yr,mo.labs,a)
#ga
ga.old <- moFunc(550000,yr,mo.labs,a)
######################################
#last year Oregon
######################################
#separate growing areas into own vector
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(or.old,grw==gr.areas[i],select=mo.labs)))
gra <- gr.areas[i]
nam <- paste("or.LY.",gr.labs[i],sep="")
assign(nam,area.num)
}
######################################
#last year california
######################################
#separate growing areas into own vector
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(ca.old,grw==gr.areas[i],select=mo.labs)))
gra <- gr.areas[i]
nam <- paste("ca.LY.",gr.labs[i],sep="")
assign(nam,area.num)
}
######################################
#last year georgia
######################################
#separate growing areas into own vector
for(i in 1:length(ga.gr.areas)){
area.num <- as.vector(t(subset(ga.old,grw==ga.gr.areas[i],select=mo.labs)))
gra <- gr.areas[i]
nam <- paste("ga.LY.",ga.gr.labs[i],sep="")
assign(nam,area.num)
}
######################################
#this year prod plan
######################################
#setup all locations
#change year to current year
yr <- as.numeric(format(Sys.Date(),"%y"))
mo <- as.numeric(format(Sys.Date(),"%m"))-1
#ca
ca.plan <- moFuncEQU(170000,yr,mo.labs,a)
#or
or.plan <- moFuncEQU(160000,yr,mo.labs,a)
#ga
ga.plan <- moFuncEQU(550000,yr,mo.labs,a)
#separate growing areas into own vector
#oregon
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(or.plan,grw==gr.areas[i],select=mo.labs)))
nam <- paste("or.plan.",gr.labs[i],sep="")
assign(nam,area.num)
}
#california
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(ca.plan,grw==gr.areas[i],select=mo.labs)))
nam <- paste("ca.plan.",gr.labs[i],sep="")
assign(nam,area.num)
}
#georgia
for(i in 1:length(ga.gr.areas)){
area.num <- as.vector(t(subset(ga.plan,grw==ga.gr.areas[i],select=mo.labs)))
nam <- paste("ga.plan.",ga.gr.labs[i],sep="")
assign(nam,area.num)
}
######################################
#YTD numbers
######################################
mo.cnt <- as.numeric(format(Sys.Date(),"%m"))-1
morng <- 1:mo.cnt
mo.labs.2 <- mo.labs[1:mo.cnt]
yr <- 14
#function to get YTD numbers
moFuncYTD <- function(locs, yr, mo.labs, a, rng){
for(i in rng){
oldProd <- a[loc == locs & lb.yr == yr & lb.mo == mo.labs[i] & wo != 15 & wo != 99,
list(gross=sum(gre)),
keyby = grw]
if(i == 1){
one <- oldProd
} else one <- merge(one, oldProd,by="grw",all=TRUE)
newnm <- mo.labs[i]
setnames(one,"gross",newnm)
}
one[is.na(one)]<-0
return(one)
}
or.YTD <- moFuncYTD(160000, yr, mo.labs, a, morng)
ca.YTD <- moFuncYTD(170000, yr, mo.labs, a, morng)
ga.YTD <- moFuncYTD(550000, yr, mo.labs, a, morng)
##separate growing areas into own vector
#oregon
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(or.YTD,grw==gr.areas[i],select=mo.labs.2)))
nam <- paste("or.YTD.",gr.labs[i],sep="")
assign(nam,area.num)
}
#california
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(ca.YTD,grw==gr.areas[i],select=mo.labs.2)))
nam <- paste("ca.YTD.",gr.labs[i],sep="")
assign(nam,area.num)
}
#georgia
for(i in 1:length(ga.gr.areas)){
area.num <- as.vector(t(subset(or.YTD,grw==ga.gr.areas[i],select=mo.labs.2)))
nam <- paste("ga.YTD.",ga.gr.labs[i],sep="")
assign(nam,area.num)
}
################################
#for plotting
################################
addsums <- function(area){
i = 1
sums <- list()
newone <- 0
while(i < 13){
newone <- area[[i]] + newone
sums[[i]] <- newone
i = i + 1
}
sums <- as.numeric(sums)
}
####################################
#LY cumulative
####################################
#oregon LY cumulative vectors
or.loc.vec <- list()
or.loc.vec[[1]] <- or.LY.1
or.loc.vec[[2]] <- or.LY.3
or.loc.vec[[3]] <- or.LY.4
or.loc.vec[[4]] <- or.LY.7
for(i in 1:length(gr.areas)){
temp <- addsums(or.loc.vec[[i]])
nam <- paste("or.ly.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(or.loc.vec)
#california LY cumulative vectors
ca.loc.vec <- list()
ca.loc.vec[[1]] <- ca.LY.1
ca.loc.vec[[2]] <- ca.LY.3
ca.loc.vec[[3]] <- ca.LY.4
ca.loc.vec[[4]] <- ca.LY.7
for(i in 1:length(gr.areas)){
temp <- addsums(ca.loc.vec[[i]])
nam <- paste("ca.ly.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ca.loc.vec)
#georgia LY cumulative vectors
ga.loc.vec <- list()
ga.loc.vec[[1]] <- ga.LY.1
ga.loc.vec[[2]] <- ga.LY.2
ga.loc.vec[[3]] <- ga.LY.7
for(i in 1:length(ga.gr.areas)){
temp <- addsums(ga.loc.vec[[i]])
nam <- paste("ga.ly.",ga.gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ga.loc.vec)
#######################################
#plan cumulative
#######################################
#oregon plan cumulative vectors
or.loc.vec <- list()
or.loc.vec[[1]] <- or.plan.1
or.loc.vec[[2]] <- or.plan.3
or.loc.vec[[3]] <- or.plan.4
or.loc.vec[[4]] <- or.plan.7
for(i in 1:length(gr.areas)){
temp <- addsums(or.loc.vec[[i]])
nam <- paste("or.plan.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(or.loc.vec)
#california plan cumulative vectors
ca.loc.vec <- list()
ca.loc.vec[[1]] <- ca.plan.1
ca.loc.vec[[2]] <- ca.plan.3
ca.loc.vec[[3]] <- ca.plan.4
ca.loc.vec[[4]] <- ca.plan.7
for(i in 1:length(gr.areas)){
temp <- addsums(ca.loc.vec[[i]])
nam <- paste("ca.plan.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ca.loc.vec)
#georgia plan cumulative vectors
ga.loc.vec <- list()
ga.loc.vec[[1]] <- ga.plan.1
ga.loc.vec[[2]] <- ga.plan.2
ga.loc.vec[[3]] <- ga.plan.7
for(i in 1:length(ga.gr.areas)){
temp <- addsums(ga.loc.vec[[i]])
nam <- paste("ga.plan.",ga.gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ga.loc.vec)
#######################################
#YTD cumulative
#######################################
addsumsYTD <- function(area, months.cnt){
i = 1
sums <- list()
newone <- 0
while(i <= months.cnt){
newone <- area[[i]] + newone
sums[[i]] <- newone
i = i + 1
}
sums <- as.numeric(sums)
}
#oregon YTD cumulative vectors
or.loc.vec <- list()
or.loc.vec[[1]] <- or.YTD.1
or.loc.vec[[2]] <- or.YTD.3
or.loc.vec[[3]] <- or.YTD.4
or.loc.vec[[4]] <- or.YTD.7
for(i in 1:length(gr.areas)){
temp <- addsumsYTD(or.loc.vec[[i]],mo.cnt)
nam <- paste("or.YTD.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(or.loc.vec)
#california YTD cumulative vectors
ca.loc.vec <- list()
ca.loc.vec[[1]] <- ca.YTD.1
ca.loc.vec[[2]] <- ca.YTD.3
ca.loc.vec[[3]] <- ca.YTD.4
ca.loc.vec[[4]] <- ca.YTD.7
for(i in 1:length(gr.areas)){
temp <- addsumsYTD(ca.loc.vec[[i]],mo.cnt)
nam <- paste("ca.YTD.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ca.loc.vec)
#georgia YTD cumulative vectors
ga.loc.vec <- list()
ga.loc.vec[[1]] <- ga.YTD.1
ga.loc.vec[[2]] <- ga.YTD.2
ga.loc.vec[[3]] <- ga.YTD.7
for(i in 1:length(ga.gr.areas)){
temp <- addsumsYTD(ga.loc.vec[[i]],mo.cnt)
nam <- paste("ga.YTD.",ga.gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ga.loc.vec)
#############################################
#location totals
#############################################
##Function for getting cumulative by month for each location
loc.tot.vec <- function(dframe){
mos <- length(names(dframe))
cols <- list()
for(i in 2:mos){
#total each month into list of twelve totals
colasn <- i - 1
cols[[colasn]] <- sum(as.numeric(dframe[[i]]))
}
colsCum <- list()
for(i in 2:mos-1){
n <- i - 1
colsCum[[i]] <- sum(as.numeric(cols[1:n])) + cols[[i]]
}
colsCum[[1]] <- cols[[1]]
colsCum <- unlist(colsCum)
}
#or.plan.tots <- loc.tot.vec(or.plan)
or.plan.tots <- as.vector(plan.data$OREGON)
or.ly.tots <- loc.tot.vec(or.old)
or.YTD.tots <- loc.tot.vec(or.YTD)
#ca.plan.tots <- loc.tot.vec(ca.plan)
ca.plan.tots <- as.vector(plan.data$CALIFORNIA)
ca.ly.tots <- loc.tot.vec(ca.old)
ca.YTD.tots <- loc.tot.vec(ca.YTD)
#ga.plan.tots <- loc.tot.vec(ga.plan)
ga.plan.tots <- as.vector(plan.data$GEORGIA)
ga.ly.tots <- loc.tot.vec(ga.old)
ga.YTD.tots <- loc.tot.vec(ga.YTD)
all.locs.ly <- or.ly.tots + ca.ly.tots + ga.ly.tots
all.locs.ytd <- or.YTD.tots + ca.YTD.tots + ga.YTD.tots
all.locs.plan <- or.plan.tots + ca.plan.tots + ga.plan.tots
##############################################
#plotting
##############################################
#Get YTD % complete for each location
or.perc.comp <- paste(round((all.locs.totals[[9]][1])*100,digits=2),"%",sep="")
ca.perc.comp <- paste(round((all.locs.totals[[9]][2])*100,digits=2),"%",sep="")
ga.perc.comp <- paste(round((all.locs.totals[[9]][3])*100,digits=2),"%",sep="")
all.perc.comp <- paste(round((all.locs.totals[[9]][4])*100,digits=2),"%",sep="")
canplots <- function(plan,ly,ytd,perc.comp){
#create empty plot
mxplan <- max(plan)
mxly <- max(ly)
if(mxplan > mxly){
plotmat <- plan
} else plotmat <- ly
par(mar=c(5,4,5,2)+0.1)
plot(plotmat,
type = "n",
lwd = 2,
lty = 1,
yaxt = "n",
ylab = "",
xlab = "Months")
locs <- substr(deparse(substitute(plan)),1,2)
if(locs=="or"){
loc = "Oregon"
} else if(locs=="ca"){
loc = "California"
} else if(locs=="ga"){
loc = "Georgia"
} else loc = "All Locations"
topTitle <- paste(loc," Canning by Month",sep="")
lowTitle <- paste(perc.comp," of Total Year Plan Complete")
title(topTitle, cex.main = 1.8, font.main = 1.5, col.main = "darkgreen")
mtext(lowTitle,cex = 1.1)
#add ablines and ticks
ylns <- c(1000000,2000000,3000000,4000000,5000000,6000000,7000000,8000000,9000000,10000000,11000000,12000000,13000000,14000000,15000000,16000000,
17000000,18000000,19000000,20000000,21000000,22000000,23000000,24000000,25000000,26000000,27000000,28000000,29000000,30000000,31000000,32000000,
33000000,34000000,35000000,36000000,37000000,38000000,39000000,40000000)
ytxt <- c("1M","2M","3M","4M","5M","6M","7M","8M","9M","10M","11M","12M","13M","14M","15M","16M",
"17M","18M","19M","20M","21M","22M","23M","24M","25M","26M","27M","28M","29M","30M","31M","32M",
"33M","34M","35M","36M","37M","38M","39M","40M")
axis(2,at=ylns,labels=ytxt, las=1)
abline(h=ylns,col="gray88",lty="dashed")
xlns <- c(1,2,3,4,5,6,7,8,9,10,11,12)
abline(v=xlns,col="gray88",lty="dashed")
#add lines
lines(ly, col = "grey", lwd = 2, lty="dotted")
lines(plan, col = "azure4", lwd = 2)
lines(ytd, col = "red", lwd = 5)
legend("topleft",
c("2013","2014 Planned","2014 YTD"),
col=c("grey","azure4","red"),
lty=c(3,1,1),
lwd=c(2,2,5),
cex=0.5)
}
orplt <- canplots(or.plan.tots,or.ly.tots,or.YTD.tots,or.perc.comp)
caplt <- canplots(ca.plan.tots,ca.ly.tots,ca.YTD.tots,ca.perc.comp)
gaplt <- canplots(ga.plan.tots,ga.ly.tots,ga.YTD.tots,ga.perc.comp)
allplt <- canplots(all.locs.plan,all.locs.ly,all.locs.ytd,all.perc.comp)
#############################################
#EQU DATA
#############################################
#############################################
yr <- as.numeric(format(Sys.Date(),"%y"))
#function to create dataframe for a given location and growing area for complete YTD or just month
createframeEQU <- function(location, GA, n=1, year=FALSE){
#n is 1 for "size" or 2 for "cat"
kbys = c("size","cat")
if(year==TRUE){
temp.a <- a[loc==location & year==yr & month<=mo & grw==GA & wo!=15 & wo!=99 & wo!=0,
list(orig.qty=sum(equ),prod=sum(equ.pr)),keyby=eval(kbys[n])]
temp.b <- a[loc==location & lb.yr==yr & lb.mo<=mo & grw==GA & wo!=15 & wo!=99 & wo!=0,
list(grss=sum(gre)),keyby=eval(kbys[n])]
temp.a <- merge(temp.a,temp.b,by=eval(kbys[n]),all=TRUE)
temp.a[is.na(temp.a)] <- 0
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
if(n==1){
newrow <- data.frame(size=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
}else newrow <- data.frame(cat=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
temp.a <- rbind(temp.a,newrow)
}else{
temp.a <- a[loc==location & year==yr & month==mo & grw==GA & wo!=15 & wo!=99 & wo!=0,
list(orig.qty=sum(equ),prod=sum(equ.pr),grss=sum(gre)),keyby=eval(kbys[n])]
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
if(n==1){
newrow <- data.frame(size=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
}else newrow <- data.frame(cat=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
temp.a <- rbind(temp.a,newrow)
}
temp.a
}
yr.ca.sz.500001 <- createframeEQU(170000,500001,year=TRUE)
yr.ca.sz.500003 <- createframeEQU(170000,500003,year=TRUE)
yr.ca.sz.500004 <- createframeEQU(170000,500004,year=TRUE)
yr.ca.cat.500001 <- createframeEQU(170000,500001,n=2,year=TRUE)
yr.ca.cat.500003 <- createframeEQU(170000,500003,n=2,year=TRUE)
yr.ca.cat.500004 <- createframeEQU(170000,500004,n=2,year=TRUE)
yr.or.sz.500001 <- createframeEQU(160000,500001,year=TRUE)
yr.or.sz.500003 <- createframeEQU(160000,500003,year=TRUE)
yr.or.sz.500004 <- createframeEQU(160000,500004,year=TRUE)
yr.or.cat.500001 <- createframeEQU(160000,500001,n=2,year=TRUE)
yr.or.cat.500003 <- createframeEQU(160000,500003,n=2,year=TRUE)
yr.or.cat.500004 <- createframeEQU(160000,500004,n=2,year=TRUE)
yr.ga.sz.500001 <- createframeEQU(550000,500001,year=TRUE)
yr.ga.sz.500002 <- createframeEQU(550000,500002,year=TRUE)
yr.ga.cat.500001 <- createframeEQU(550000,500001,n=2,year=TRUE)
yr.ga.cat.500002 <- createframeEQU(550000,500002,n=2,year=TRUE)
mo.ca.sz.500001 <- createframeEQU(170000,500001)
mo.ca.sz.500003 <- createframeEQU(170000,500003)
mo.ca.sz.500004 <- createframeEQU(170000,500004)
mo.ca.cat.500001 <- createframeEQU(170000,500001,n=2)
mo.ca.cat.500003 <- createframeEQU(170000,500003,n=2)
mo.ca.cat.500004 <- createframeEQU(170000,500004,n=2)
mo.or.sz.500001 <- createframeEQU(160000,500001)
mo.or.sz.500003 <- createframeEQU(160000,500003)
mo.or.sz.500004 <- createframeEQU(160000,500004)
mo.or.cat.500001 <- createframeEQU(160000,500001,n=2)
mo.or.cat.500003 <- createframeEQU(160000,500003,n=2)
mo.or.cat.500004 <- createframeEQU(160000,500004,n=2)
mo.ga.sz.500001 <- createframeEQU(550000,500001)
mo.ga.sz.500002 <- createframeEQU(550000,500002)
mo.ga.cat.500001 <- createframeEQU(550000,500001,n=2)
mo.ga.cat.500002 <- createframeEQU(550000,500002,n=2)
###########################################
#Propagation
############################################
for(i in grow.loc.rng){
#mo data
mo.1 <- as.numeric(format(Sys.Date(),"%m")) - 1
#december handler
if(mo.1 == 0) mo.1 <- 12
mo <- mo.1
nam <- paste("prop.mo.",grow.loc[i],sep="")
p.mo <- a[loc == grow.loc[i] & year == 14 & month == mo & grw == 500007 & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(equ),prod=sum(equ.pr),grss=sum(gre)),
keyby = st.op]
p.mo[is.na(p.mo)]<-0
p.mo$perc <- p.mo$grss/p.mo$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(p.mo$orig.qty)
c3 <- sum(p.mo$prod)
c4 <- sum(p.mo$grss)
c5 <- c4/c3
newrow <- data.frame(st.op = c1, orig.qty = c2, prod = c3, grss = c4, perc = c5)
p.mo <- rbind(p.mo, newrow)
assign(nam,p.mo)
#yr data
nam <- paste("prop.yr.",grow.loc[i],sep="")
p.yr <- a[loc == grow.loc[i] & year == 14 & month <= mo & grw == 500007 & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(equ),prod=sum(equ.pr)),
keyby = st.op]
temp.a <- a[loc == grow.loc[i] & lb.yr == 14 & lb.mo <= mo & grw == 500007 & wo != 15 & wo != 99 & wo!=0,
list(grss=sum(gre)),
keyby = st.op]
p.yr <- merge(p.yr,temp.a,by="st.op",all=TRUE)
p.yr[is.na(p.yr)]<-0
p.yr$perc <- p.mo$grss/p.mo$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(p.yr$orig.qty)
c3 <- sum(p.yr$prod)
c4 <- sum(p.yr$grss)
c5 <- c4/c3
newrow <- data.frame(st.op = c1, orig.qty = c2, prod = c3, grss = c4, perc = c5)
p.yr <- rbind(p.yr, newrow)
assign(nam,p.yr)
#ft data
mo <- as.numeric(mo.1) + 1
nam <- paste("prop.ft.",grow.loc[i],sep="")
temp.a <- a[loc == grow.loc[i] & year == 14 & month == mo & grw == 500007,
list(prod=sum(equ.pr)),
keyby = st.op]
mo <- as.numeric(mo.1) + 2
temp.b <- a[loc == grow.loc[i] & year == 14 & month == mo & grw == 500007,
list(prod=sum(equ.pr)),
keyby = st.op]
mo <- as.numeric(mo.1) + 3
temp.c <- a[loc == grow.loc[i] & year == 14 & month == mo & grw == 500007,
list(prod=sum(equ.pr)),
keyby = st.op]
temp.a <- merge(temp.a,temp.b,by="st.op",all=TRUE)
temp.a <- merge(temp.a, temp.c,by="st.op",all=TRUE)
temp.a[is.na(temp.a)]<-0
temp.a$sums <- temp.a$prod.x + temp.a$prod.y + temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$prod.x)
c3 <- sum(temp.a$prod.y)
c4 <- sum(temp.a$prod)
c5 <- c2 + c3 + c4
newrow <- data.frame(st.op = c1, prod.x = c2, prod.y = c3, prod = c4, sums = c5)
temp.a <- rbind(temp.a, newrow)
assign(nam,temp.a)
}
###########################################
#write files to folder
###########################################
#ca by size
##ga 1
ca.sz.1 <- merge(mo.ca.sz.500001,yr.ca.sz.500001,by="size",all=TRUE)
ca.sz.1 <- merge(ca.sz.1,ft.ca.sz.500001,by="size",all=TRUE)
ca.sz.1[is.na(ca.sz.1)] <- 0
write.csv(ca.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.1.EQU.csv")
##ga 3
ca.sz.3 <- merge(mo.ca.sz.500003,yr.ca.sz.500003,by="size",all=TRUE)
ca.sz.3 <- merge(ca.sz.3,ft.ca.sz.500003,by="size",all=TRUE)
ca.sz.3[is.na(ca.sz.3)] <- 0
write.csv(ca.sz.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.3.EQU.csv")
##ga 4
ca.sz.4 <- merge(mo.ca.sz.500004,yr.ca.sz.500004,by="size",all=TRUE)
ca.sz.4 <- merge(ca.sz.4,ft.ca.sz.500004,by="size",all=TRUE)
ca.sz.4[is.na(ca.sz.4)] <- 0
write.csv(ca.sz.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.4.EQU.csv")
#ca by cat
##ga 1
ca.cat.1 <- merge(mo.ca.cat.500001,yr.ca.cat.500001,by="cat",all=TRUE)
ca.cat.1 <- merge(ca.cat.1,ft.ca.cat.500001,by="cat",all=TRUE)
ca.cat.1[is.na(ca.cat.1)] <- 0
write.csv(ca.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.1.EQU.csv")
##ga 3
ca.cat.3 <- merge(mo.ca.cat.500003,yr.ca.cat.500003,by="cat",all=TRUE)
ca.cat.3 <- merge(ca.cat.3,ft.ca.cat.500003,by="cat",all=TRUE)
ca.cat.3[is.na(ca.cat.3)] <- 0
write.csv(ca.cat.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.3.EQU.csv")
##ga 4
ca.cat.4 <- merge(mo.ca.cat.500004,yr.ca.cat.500004,by="cat",all=TRUE)
ca.cat.4 <- merge(ca.cat.4,ft.ca.cat.500004,by="cat",all=TRUE)
ca.cat.4[is.na(ca.cat.4)] <- 0
write.csv(ca.cat.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.4.EQU.csv")
##prop
ca.prop <- merge(prop.mo.170000,prop.yr.170000,by="st.op",all=TRUE)
ca.prop <- merge(ca.prop,prop.ft.170000,by="st.op",all=TRUE)
ca.prop[is.na(ca.prop)] <- 0
write.csv(ca.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.zprop.EQU.csv")
#or by size
or.sz.1 <- merge(mo.or.sz.500001,yr.or.sz.500001,by="size",all=TRUE)
or.sz.1 <- merge(or.sz.1,ft.or.sz.500001,by="size",all=TRUE)
or.sz.1[is.na(or.sz.1)] <- 0
write.csv(or.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.1.EQU.csv")
##ga 3
or.sz.3 <- merge(mo.or.sz.500003,yr.or.sz.500003,by="size",all=TRUE)
or.sz.3 <- merge(or.sz.3,ft.or.sz.500003,by="size",all=TRUE)
or.sz.3[is.na(or.sz.3)] <- 0
write.csv(or.sz.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.3.EQU.csv")
##ga 4
or.sz.4 <- merge(mo.or.sz.500004,yr.or.sz.500004,by="size",all=TRUE)
or.sz.4 <- merge(or.sz.4,ft.or.sz.500004,by="size",all=TRUE)
or.sz.4[is.na(or.sz.4)] <- 0
write.csv(or.sz.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.4.EQU.csv")
#or by cat
##ga 1
or.cat.1 <- merge(mo.or.cat.500001,yr.or.cat.500001,by="cat",all=TRUE)
or.cat.1 <- merge(or.cat.1,ft.or.cat.500001,by="cat",all=TRUE)
or.cat.1[is.na(or.cat.1)] <- 0
write.csv(or.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.1.EQU.csv")
##ga 3
or.cat.3 <- merge(mo.or.cat.500003,yr.or.cat.500003,by="cat",all=TRUE)
or.cat.3 <- merge(or.cat.3,ft.or.cat.500003,by="cat",all=TRUE)
or.cat.3[is.na(or.cat.3)] <- 0
write.csv(or.cat.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.3.EQU.csv")
##ga 4
or.cat.4 <- merge(mo.or.cat.500004,yr.or.cat.500004,by="cat",all=TRUE)
or.cat.4 <- merge(or.cat.4,ft.or.cat.500004,by="cat",all=TRUE)
or.cat.4[is.na(or.cat.4)] <- 0
write.csv(or.cat.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.4.EQU.csv")
##prop
or.prop <- merge(prop.mo.160000,prop.yr.160000,by="st.op",all=TRUE)
or.prop <- merge(or.prop,prop.ft.160000,by="st.op",all=TRUE)
or.prop[is.na(or.prop)] <- 0
write.csv(or.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.zprop.EQU.csv")
#ga by size
#ga 1
ga.sz.1 <- merge(mo.ga.sz.500001,yr.ga.sz.500001,by="size",all=TRUE)
ga.sz.1 <- merge(ga.sz.1,ft.ga.sz.500001,by="size",all=TRUE)
ga.sz.1[is.na(ga.sz.1)] <- 0
write.csv(ga.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.sz.1.EQU.csv")
##ga 2
ga.sz.2 <- merge(mo.ga.sz.500002,yr.ga.sz.500002,by="size",all=TRUE)
ga.sz.2 <- merge(ga.sz.2,ft.ga.sz.500002,by="size",all=TRUE)
ga.sz.2[is.na(ga.sz.2)] <- 0
write.csv(ga.sz.2,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.sz.2.EQU.csv")
#ga by cat
##ga 1
ga.cat.1 <- merge(mo.ga.cat.500001,yr.ga.cat.500001,by="cat",all=TRUE)
ga.cat.1 <- merge(ga.cat.1,ft.ga.cat.500001,by="cat",all=TRUE)
ga.cat.1[is.na(ga.cat.1)] <- 0
write.csv(ga.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.cat.1.EQU.csv")
##ga 2
ga.cat.2 <- merge(mo.ga.cat.500002,yr.ga.cat.500002,by="cat",all=TRUE)
ga.cat.2 <- merge(ga.cat.2,ft.ga.cat.500002,by="cat",all=TRUE)
ga.cat.2[is.na(ga.cat.2)] <- 0
write.csv(ga.cat.2,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.cat.2.EQU.csv")
##prop
ga.prop <- merge(prop.mo.550000,prop.yr.550000,by="st.op",all=TRUE)
ga.prop <- merge(ga.prop,prop.ft.550000,by="st.op",all=TRUE)
ga.prop[is.na(ga.prop)] <- 0
write.csv(ga.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.zprop.EQU.csv")
| /CANNING.R | no_license | kennyd3/CANNING | R | false | false | 49,928 | r | #YOU MIGHT WANT TO ADD SOME LIBRARIES HERE
###########################################
#read-in & setup data frame & clean up data
###########################################
can <- read.csv("C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/Downloads/curr.csv", header=TRUE, skip=6)
plan.data <- read.csv("C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/Downloads/plans.csv", header=TRUE)
plan.data <- as.data.table(plan.data)
plan.grower <- read.csv("C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/Downloads/plansbyga.csv", header=TRUE)
plan.grower <- as.data.table(plan.grower)
yr <- as.numeric(format(Sys.Date(),"%y"))
#Get only pertinent columns
loc <- which(colnames(can)=="Loc." )
grw <- which(colnames(can)=="Grower")
cat <- which(colnames(can)=="Category")
bsz <- which(colnames(can)=="Base.Size")
osd <- which(colnames(can)=="Orig..Str.Dt")
ppl <- which(colnames(can)=="Prod..Plan")
ppq <- which(colnames(can)=="Prod..Plan.EQU")
grp <- which(colnames(can)=="Gross..Prod.Qty")
gre <- which(colnames(can)=="EQU..Prod.Qty")
sto <- which(colnames(can)=="Std.Oper")
ldt <- which(colnames(can)=="Labor.Date")
wos <- which(colnames(can)=="WO.Stat")
opc <- which(colnames(can)=="Oper..Complete")
qty <- which(colnames(can)=="Orig..Ord.Qty")
equ <- which(colnames(can)=="Orig..Ord.EQU")
wip <- which(colnames(can)=="WIP.Ord.")
#put column numbers in vector
clmns <- c(loc,grw,cat,bsz,osd,ppl,sto,ldt,wos,opc,qty,equ,wip,grp,gre,ppq)
#create new matrix with just pertinent columns
can.1 <- can[,clmns]
#delete unneeded rows
can.2 <- can.1[!(can.1$Base.Size == ""),]
can.2 <- can.2[!(can.2$Grower == 500051),]
can.2 <- can.2[!(can.2$Grower == 500008),]
#change VH to CA
can.2$Loc.[can.2$Loc. == 180000]<-170000
can.2$WO.Stat[is.na(can.2$WO.Stat)]<-0
#create columns for month and year
###find length of date and use if to get dates
dt.1 <- as.numeric(substring(as.character(can.2$Orig..Str.Dt[1]),7,8))
#to handle whether date is held like "xx/xx/xx" or like "xxxxxxxxx"
if(dt.1 + 1 == yr|dt.1 == yr){
#function to separate month from string
unlst.mo <- function(x){
newcol <- unlist(strsplit(x, "[/]"))[1]
}
unlst.yr <- function(x){
newcol <- substring(unlist(strsplit(x, "[/]"))[3],3,4)
}
can.2$st.dt <- as.character(can.2$Orig..Str.Dt)
can.2$mo <- lapply(can.2$st.dt,unlst.mo)
can.2$yr <- lapply(can.2$st.dt,unlst.yr)
can.2$lb.mo <- lapply(as.character(can.2$Labor.Date),unlst.mo)
can.2$lb.yr <- lapply(as.character(can.2$Labor.Date),unlst.yr)
}else{
library(date)
can.2$st.dt <- as.Date(can.2$Orig..Str.Dt,origin="1899-12-30")
#start month
can.2$mo <- format(can.2$st.dt,"%m")
#start year
can.2$yr <- format(can.2$st.dt,"%y")
}
#remove comma characters so that columns are no longer factors and can have functions applied
can.2$Prod..Plan <- as.numeric(gsub(",","", can.2$Prod..Plan))
can.2$Prod..Plan.EQU <- as.numeric(gsub(",","", can.2$Prod..Plan.EQU))
can.2$Gross..Prod.Qty <- as.numeric(gsub(",","", can.2$Gross..Prod.Qty))
can.2$EQU..Prod.Qty <- as.numeric(gsub(",","", can.2$EQU..Prod.Qty))
can.2$Orig..Ord.Qty <- as.numeric(gsub(",","", can.2$Orig..Ord.Qty))
can.2$Orig..Ord.EQU <- as.numeric(gsub(",","", can.2$Orig..Ord.EQU))
##################################
#setup df with only necessary data
#######################################################
#to run this for production plan put "plan" for sum, for completed put "qty" for sum
library(data.table)
a <- data.table(loc=can.2$Loc.,
cat=can.2$Category,
qty=can.2$Orig..Ord.Qty,
month=can.2$mo,
year=can.2$yr,
grw=can.2$Grower,
plan=can.2$Prod..Plan,
grpr=can.2$Gross..Prod.Qty,
gre=can.2$EQU..Prod.Qty,
equ.pr=can.2$Prod..Plan.EQU,
size=can.2$Base.Size,
equ=can.2$Orig..Ord.EQU,
wo = can.2$WO.Stat,
lb.mo = can.2$lb.mo,
lb.yr = can.2$lb.yr,
st.op = can.2$Std.Oper)
a <- a[grw!=0,]
##################################
#variables
#########################################
###################################
grow.loc <- c(160000,170000,550000)
grow.loc.rng <- 1:length(grow.loc)
GA.or.ca <- c(500001,500003,500004)
GA.ga <- c(500001,500002)
or.ca.rng <- 1:length(GA.or.ca)
ga.rng <- 1:length(GA.ga)
#prior month
mo.1 <- as.numeric(format(Sys.Date(),"%m"))-1
mo = mo.1
#december handler
if(mo.1 == 0) mo <- 12
#function to create dataframe for a given location and growing area for complete YTD or just month
createframe <- function(location, GA, n=1, year=FALSE){
#n is 1 for "size" or 2 for "cat"
kbys = c("size","cat")
if(year==TRUE){
temp.a <- a[loc==location & year==yr & month<=mo & grw==GA & wo!=15 & wo!=99 & wo != 0,
list(orig.qty=sum(qty),prod=sum(plan)),keyby=eval(kbys[n])]
temp.b <- a[loc==location & lb.yr == yr & lb.mo <= mo & grw==GA & wo != 15 & wo != 99 & wo != 0,
list(grss=sum(grpr)), keyby = eval(kbys[n])]
temp.a <- merge(temp.a,temp.b,by=eval(kbys[n]),all=TRUE)
temp.a[is.na(temp.a)]<-0
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
if(n==1){
newrow <- data.frame(size=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
}else newrow <- data.frame(cat=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
temp.a <- rbind(temp.a,newrow)
}else{
temp.a <- a[loc==location & year==yr & month==mo & grw==GA & wo!=15 & wo!=99 & wo != 0,
list(orig.qty=sum(qty),prod=sum(plan),grss=sum(grpr)),keyby=eval(kbys[n])]
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
if(n==1){
newrow <- data.frame(size=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
}else newrow <- data.frame(cat=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
temp.a <- rbind(temp.a,newrow)
}
temp.a
}
yr.ca.sz.500001 <- createframe(170000,500001,year=TRUE)
yr.ca.sz.500003 <- createframe(170000,500003,year=TRUE)
yr.ca.sz.500004 <- createframe(170000,500004,year=TRUE)
yr.ca.cat.500001 <- createframe(170000,500001,n=2,year=TRUE)
yr.ca.cat.500003 <- createframe(170000,500003,n=2,year=TRUE)
yr.ca.cat.500004 <- createframe(170000,500004,n=2,year=TRUE)
yr.or.sz.500001 <- createframe(160000,500001,year=TRUE)
yr.or.sz.500003 <- createframe(160000,500003,year=TRUE)
yr.or.sz.500004 <- createframe(160000,500004,year=TRUE)
yr.or.cat.500001 <- createframe(160000,500001,n=2,year=TRUE)
yr.or.cat.500003 <- createframe(160000,500003,n=2,year=TRUE)
yr.or.cat.500004 <- createframe(160000,500004,n=2,year=TRUE)
yr.ga.sz.500001 <- createframe(550000,500001,year=TRUE)
yr.ga.sz.500002 <- createframe(550000,500002,year=TRUE)
yr.ga.cat.500001 <- createframe(550000,500001,n=2,year=TRUE)
yr.ga.cat.500002 <- createframe(550000,500002,n=2,year=TRUE)
mo.ca.sz.500001 <- createframe(170000,500001)
mo.ca.sz.500003 <- createframe(170000,500003)
mo.ca.sz.500004 <- createframe(170000,500004)
mo.ca.cat.500001 <- createframe(170000,500001,n=2)
mo.ca.cat.500003 <- createframe(170000,500003,n=2)
mo.ca.cat.500004 <- createframe(170000,500004,n=2)
mo.or.sz.500001 <- createframe(160000,500001)
mo.or.sz.500003 <- createframe(160000,500003)
mo.or.sz.500004 <- createframe(160000,500004)
mo.or.cat.500001 <- createframe(160000,500001,n=2)
mo.or.cat.500003 <- createframe(160000,500003,n=2)
mo.or.cat.500004 <- createframe(160000,500004,n=2)
mo.ga.sz.500001 <- createframe(550000,500001)
mo.ga.sz.500002 <- createframe(550000,500002)
mo.ga.cat.500001 <- createframe(550000,500001,n=2)
mo.ga.cat.500002 <- createframe(550000,500002,n=2)
########################################
#by future
########################################
#########################################
createfuture <- function(location, GA, n=1){
kbys = c("size","cat")
mo.1 <- as.numeric(format(Sys.Date(),"%m"))
mo <- mo.1
yr <- as.numeric(format(Sys.Date(),"%y"))
temp.a <- a[loc==location & year==yr & month==mo & grw==GA,
list(prod=sum(plan)),keyby=eval(kbys[n])]
mo <- mo.1 + 1
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.b <- a[loc==location & year==yr & month==mo & grw==GA,
list(prod=sum(plan)),keyby=eval(kbys[n])]
mo <- mo.1 + 2
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.c <- a[loc==location & year==yr & month==mo & grw==GA,
list(prod=sum(plan)),keyby=eval(kbys[n])]
temp.a <- merge(temp.a,temp.b,keyby=eval(kbys[n]),all=TRUE)
temp.a <- merge(temp.a, temp.c,keyby=eval(kbys[n]),all=TRUE)
temp.a[is.na(temp.a)]<-0
temp.a$Total <- temp.a$prod.x + temp.a$prod.y + temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$prod.x)
c3 <- sum(temp.a$prod.y)
c4 <- sum(temp.a$prod)
c5 <- sum(temp.a$Total)
if(n==1){
newrow <- data.frame(size = c1, prod.x = c2, prod.y = c3, prod = c4, Total = c5)
}else newrow <- data.frame(cat = c1, prod.x = c2, prod.y = c3, prod = c4, Total = c5)
temp.a <- rbind(temp.a, newrow)
return(temp.a)
}
ft.ca.sz.500001 <- createfuture(170000,500001)
ft.ca.sz.500003 <- createfuture(170000,500003)
ft.ca.sz.500004 <- createfuture(170000,500004)
ft.ca.cat.500001 <- createfuture(170000,500001,n=2)
ft.ca.cat.500003 <- createfuture(170000,500003,n=2)
ft.ca.cat.500004 <- createfuture(170000,500004,n=2)
ft.or.sz.500001 <- createfuture(160000,500001)
ft.or.sz.500003 <- createfuture(160000,500003)
ft.or.sz.500004 <- createfuture(160000,500004)
ft.or.cat.500001 <- createfuture(160000,500001,n=2)
ft.or.cat.500003 <- createfuture(160000,500003,n=2)
ft.or.cat.500004 <- createfuture(160000,500004,n=2)
ft.ga.sz.500001 <- createfuture(550000,500001)
ft.ga.sz.500002 <- createfuture(550000,500002)
ft.ga.cat.500001 <- createfuture(550000,500001,n=2)
ft.ga.cat.500002 <- createfuture(550000,500002,n=2)
###########################################
#Propagation
############################################
##########################################
for(i in grow.loc.rng){
#mo data
mo.1 <- as.numeric(format(Sys.Date(),"%m")) - 1
#december handler
if(mo.1 == 0) mo.1 <- 12
mo <- mo.1
nam <- paste("prop.mo.",grow.loc[i],sep="")
p.mo <- a[loc == grow.loc[i] & year == yr & month == mo & grw == 500007 & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(qty),prod=sum(plan),grss=sum(grpr)),
keyby = st.op]
p.mo[is.na(p.mo)]<-0
p.mo$perc <- p.mo$grss/p.mo$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(p.mo$orig.qty)
c3 <- sum(p.mo$prod)
c4 <- sum(p.mo$grss)
c5 <- c4/c3
newrow <- data.frame(st.op = c1, orig.qty = c2, prod = c3, grss = c4, perc = c5)
p.mo <- rbind(p.mo, newrow)
assign(nam,p.mo)
#yr data
nam <- paste("prop.yr.",grow.loc[i],sep="")
p.yr <- a[loc == grow.loc[i] & year == yr & month <= mo & grw == 500007 & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(qty),prod=sum(plan)),
keyby = st.op]
temp.a <- a[loc == grow.loc[i] & lb.yr == yr & lb.mo <= mo & grw == 500007 & wo != 15 & wo != 99 & wo != 0,
list(grss=sum(grpr)),
keyby = st.op]
p.yr <- merge(p.yr,temp.a,by="st.op",all=TRUE)
p.yr[is.na(p.yr)]<-0
p.yr$perc <- p.yr$grss/p.yr$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(p.yr$orig.qty)
c3 <- sum(p.yr$prod)
c4 <- sum(p.yr$grss)
c5 <- c4/c3
newrow <- data.frame(st.op = c1, orig.qty = c2, prod = c3, grss = c4, perc = c5)
p.yr <- rbind(p.yr, newrow)
assign(nam,p.yr)
#ft data
mo <- as.numeric(mo.1) + 1
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
nam <- paste("prop.ft.",grow.loc[i],sep="")
temp.a <- a[loc == grow.loc[i] & year == yr & month == mo & grw == 500007,
list(prod=sum(plan)),
keyby = st.op]
mo <- as.numeric(mo.1) + 2
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.b <- a[loc == grow.loc[i] & year == yr & month == mo & grw == 500007,
list(prod=sum(plan)),
keyby = st.op]
mo <- as.numeric(mo.1) + 3
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.c <- a[loc == grow.loc[i] & year == yr & month == mo & grw == 500007,
list(prod=sum(plan)),
keyby = st.op]
temp.a <- merge(temp.a,temp.b,by="st.op",all=TRUE)
temp.a <- merge(temp.a, temp.c,by="st.op",all=TRUE)
temp.a[is.na(temp.a)]<-0
temp.a$sums <- temp.a$prod.x + temp.a$prod.y + temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$prod.x)
c3 <- sum(temp.a$prod.y)
c4 <- sum(temp.a$prod)
c5 <- c2 + c3 + c4
newrow <- data.frame(st.op = c1, prod.x = c2, prod.y = c3, prod = c4, sums = c5)
temp.a <- rbind(temp.a, newrow)
assign(nam,temp.a)
}
###########################################
#write files to folder
###########################################
#############################################
#ca by size
##ga 1
ca.sz.1 <- merge(mo.ca.sz.500001,yr.ca.sz.500001,by="size",all=TRUE)
ca.sz.1 <- merge(ca.sz.1,ft.ca.sz.500001,by="size",all=TRUE)
ca.sz.1[is.na(ca.sz.1)] <- 0
write.csv(ca.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.1.csv")
##ga 3
ca.sz.3 <- merge(mo.ca.sz.500003,yr.ca.sz.500003,by="size",all=TRUE)
ca.sz.3 <- merge(ca.sz.3,ft.ca.sz.500003,by="size",all=TRUE)
ca.sz.3[is.na(ca.sz.3)] <- 0
write.csv(ca.sz.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.3.csv")
##ga 4
ca.sz.4 <- merge(mo.ca.sz.500004,yr.ca.sz.500004,by="size",all=TRUE)
ca.sz.4 <- merge(ca.sz.4,ft.ca.sz.500004,by="size",all=TRUE)
ca.sz.4[is.na(ca.sz.4)] <- 0
write.csv(ca.sz.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.4.csv")
#ca by cat
##ga 1
ca.cat.1 <- merge(mo.ca.cat.500001,yr.ca.cat.500001,by="cat",all=TRUE)
ca.cat.1 <- merge(ca.cat.1,ft.ca.cat.500001,by="cat",all=TRUE)
ca.cat.1[is.na(ca.cat.1)] <- 0
write.csv(ca.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.1.csv")
##ga 3
ca.cat.3 <- merge(mo.ca.cat.500003,yr.ca.cat.500003,by="cat",all=TRUE)
ca.cat.3 <- merge(ca.cat.3,ft.ca.cat.500003,by="cat",all=TRUE)
ca.cat.3[is.na(ca.cat.3)] <- 0
write.csv(ca.cat.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.3.csv")
##ga 4
ca.cat.4 <- merge(mo.ca.cat.500004,yr.ca.cat.500004,by="cat",all=TRUE)
ca.cat.4 <- merge(ca.cat.4,ft.ca.cat.500004,by="cat",all=TRUE)
ca.cat.4[is.na(ca.cat.4)] <- 0
write.csv(ca.cat.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.4.csv")
##prop
ca.prop <- merge(prop.mo.170000,prop.yr.170000,by="st.op",all=TRUE)
ca.prop <- merge(ca.prop,prop.ft.170000,by="st.op",all=TRUE)
ca.prop[is.na(ca.prop)] <- 0
write.csv(ca.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.zprop.csv")
#or by size
or.sz.1 <- merge(mo.or.sz.500001,yr.or.sz.500001,by="size",all=TRUE)
or.sz.1 <- merge(or.sz.1,ft.or.sz.500001,by="size",all=TRUE)
or.sz.1[is.na(or.sz.1)] <- 0
write.csv(or.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.1.csv")
##ga 3
or.sz.3 <- merge(mo.or.sz.500003,yr.or.sz.500003,by="size",all=TRUE)
or.sz.3 <- merge(or.sz.3,ft.or.sz.500003,by="size",all=TRUE)
or.sz.3[is.na(or.sz.3)] <- 0
write.csv(or.sz.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.3.csv")
##ga 4
or.sz.4 <- merge(mo.or.sz.500004,yr.or.sz.500004,by="size",all=TRUE)
or.sz.4 <- merge(or.sz.4,ft.or.sz.500004,by="size",all=TRUE)
or.sz.4[is.na(or.sz.4)] <- 0
write.csv(or.sz.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.4.csv")
#or by cat
##ga 1
or.cat.1 <- merge(mo.or.cat.500001,yr.or.cat.500001,by="cat",all=TRUE)
or.cat.1 <- merge(or.cat.1,ft.or.cat.500001,by="cat",all=TRUE)
or.cat.1[is.na(or.cat.1)] <- 0
write.csv(or.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.1.csv")
##ga 3
or.cat.3 <- merge(mo.or.cat.500003,yr.or.cat.500003,by="cat",all=TRUE)
or.cat.3 <- merge(or.cat.3,ft.or.cat.500003,by="cat",all=TRUE)
or.cat.3[is.na(or.cat.3)] <- 0
write.csv(or.cat.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.3.csv")
##ga 4
or.cat.4 <- merge(mo.or.cat.500004,yr.or.cat.500004,by="cat",all=TRUE)
or.cat.4 <- merge(or.cat.4,ft.or.cat.500004,by="cat",all=TRUE)
or.cat.4[is.na(or.cat.4)] <- 0
write.csv(or.cat.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.4.csv")
##prop
or.prop <- merge(prop.mo.160000,prop.yr.160000,by="st.op",all=TRUE)
or.prop <- merge(or.prop,prop.ft.160000,by="st.op",all=TRUE)
or.prop[is.na(or.prop)] <- 0
write.csv(or.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.zprop.csv")
#ga by size
#ga 1
ga.sz.1 <- merge(mo.ga.sz.500001,yr.ga.sz.500001,by="size",all=TRUE)
ga.sz.1 <- merge(ga.sz.1,ft.ga.sz.500001,by="size",all=TRUE)
ga.sz.1[is.na(ga.sz.1)] <- 0
write.csv(ga.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.sz.1.csv")
##ga 2
ga.sz.2 <- merge(mo.ga.sz.500002,yr.ga.sz.500002,by="size",all=TRUE)
ga.sz.2 <- merge(ga.sz.2,ft.ga.sz.500002,by="size",all=TRUE)
ga.sz.2[is.na(ga.sz.2)] <- 0
write.csv(ga.sz.2,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.sz.2.csv")
#ga by cat
##ga 1
ga.cat.1 <- merge(mo.ga.cat.500001,yr.ga.cat.500001,by="cat",all=TRUE)
ga.cat.1 <- merge(ga.cat.1,ft.ga.cat.500001,by="cat",all=TRUE)
ga.cat.1[is.na(ga.cat.1)] <- 0
write.csv(ga.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.cat.1.csv")
##ga 2
ga.cat.2 <- merge(mo.ga.cat.500002,yr.ga.cat.500002,by="cat",all=TRUE)
ga.cat.2 <- merge(ga.cat.2,ft.ga.cat.500002,by="cat",all=TRUE)
ga.cat.2[is.na(ga.cat.2)] <- 0
write.csv(ga.cat.2,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.cat.2.csv")
##prop
ga.prop <- merge(prop.mo.550000,prop.yr.550000,by="st.op",all=TRUE)
ga.prop <- merge(ga.prop,prop.ft.550000,by="st.op",all=TRUE)
ga.prop[is.na(ga.prop)] <- 0
write.csv(ga.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.zprop.csv")
###########################################
#SUMMARIES
####################################
####################################
mo.1 <- as.numeric(format(Sys.Date(),"%m"))-1
#december handler
if(mo.1 == 0) mo.1 <- 12
mo <- mo.1
##################################
#summary by mo
########################################
#####################################
for(i in grow.loc.rng){
nam <- paste("mo.", grow.loc[i],".summary",sep="")
temp.a <- a[loc == grow.loc[i] & year == yr & month == mo & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(equ),grss=sum(gre)),
keyby = grw]
#get plan from planbygr.csv file
temp.a1 <- plan.grower[LOCATION==grow.loc[i] & MONTH==mo, list(prod=sum(PLAN)),by=grw]
temp.a <- data.table(grw=temp.a$grw,orig.qty=temp.a$orig.qty,prod=temp.a1$prod,grss=temp.a$grss)
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
newrow <- data.frame(grw = c1, orig.qty = c2, prod = c3, grss = c4, perc.comp = c5)
temp.a <- rbind(temp.a, newrow)
assign(nam,temp.a)
}
##################################
#summary by yr
########################################
#########################################
for(i in grow.loc.rng){
nam <- paste("yr.", grow.loc[i],".summary",sep="")
temp.a <- a[loc == grow.loc[i] & year == yr & month <= mo & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(equ)),
keyby = grw]
#get plan from planbygr.csv file
temp.a1 <- plan.grower[LOCATION==grow.loc[i] & MONTH<=mo, list(prod=sum(PLAN)),by=grw]
temp.b <- a[loc == grow.loc[i] & lb.yr == yr & lb.mo <= mo & wo != 15 & wo != 99 & wo != 0,
list(grss=sum(gre)),
keyby = grw]
temp.a <- data.table(grw=temp.a$grw,orig.qty=temp.a$orig.qty,prod=temp.a1$prod,grss=temp.b$grss)
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
newrow <- data.frame(grw = c1, orig.qty = c2, prod = c3, grss = c4, perc.comp = c5)
temp.a <- rbind(temp.a, newrow)
assign(nam,temp.a)
}
##################################
#summary by ft
########################################
######################################
for(i in or.ca.rng){
mo.1 <- as.numeric(format(Sys.Date(),"%m"))
mo <- mo.1
nam <- paste("ft.",grow.loc[i],".summary",sep="")
temp.a <- a[loc == grow.loc[i] & year == yr & month == mo,
list(prod=sum(equ.pr)),
keyby = grw]
mo <- as.numeric(mo.1) + 1
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.b <- a[loc == grow.loc[i] & year == yr & month == mo,
list(prod=sum(equ.pr)),
keyby = grw]
mo <- as.numeric(mo.1) + 2
if(mo == 13){
mo <- 1
}else if(mo == 14){
mo <- 2
}else if(mo == 15){
mo <- 3
}
temp.c <- a[loc == grow.loc[i] & year == yr & month == mo,
list(prod=sum(equ.pr)),
keyby = grw]
temp.a <- merge(temp.a,temp.b,by="grw",all=TRUE)
temp.a <- merge(temp.a, temp.c,by="grw",all=TRUE)
temp.a[is.na(temp.a)]<-0
temp.a$Total <- temp.a$prod.x + temp.a$prod.y + temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$prod.x)
c3 <- sum(temp.a$prod.y)
c4 <- sum(temp.a$prod)
c5 <- sum(temp.a$Total)
newrow <- data.frame(grw = c1, prod.x = c2, prod.y = c3, prod = c4, Total = c5)
temp.a <- rbind(temp.a, newrow)
assign(nam,temp.a)
}
##################################
#merge summaries and write to csv
##################################
##################################
ca.summary <- merge(mo.170000.summary,yr.170000.summary,by="grw",all=TRUE)
ca.summary <- merge(ca.summary,ft.170000.summary,by="grw",all=TRUE)
ca.summary[is.na(ca.summary)] <- 0
write.csv(ca.summary,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.summary.csv")
or.summary <- merge(mo.160000.summary,yr.160000.summary,by="grw",all=TRUE)
or.summary <- merge(or.summary,ft.160000.summary,by="grw",all=TRUE)
or.summary[is.na(or.summary)] <- 0
write.csv(or.summary,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.summary.csv")
ga.summary <- merge(mo.550000.summary,yr.550000.summary,by="grw",all=TRUE)
ga.summary <- merge(ga.summary,ft.550000.summary,by="grw",all=TRUE)
ga.summary[is.na(ga.summary)] <- 0
write.csv(ga.summary,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.summary.csv")
######################################
#all locations summary
######################################
loc.sums <- list(or.summary,ca.summary,ga.summary)
loc.nams <- c("Oregon","California","Georgia")
temp.tab <- or.summary[orig.qty.x < 0,]
for(i in 1:length(loc.sums)){
temp.a <- loc.sums[[i]][nrow(loc.sums[[i]]),]
temp.a[1, 1] <- loc.nams[i]
temp.tab <- rbind(temp.tab,temp.a)
}
####READ IN PLAN DATA AND REPLACE CURRENT PLAN####
#GET ACTUAL PLAN DATA AS OF BEGINNING OF YEAR
curr.month <- as.numeric(format(Sys.Date(),"%m"))-1
temp.tab <- as.data.table(temp.tab)
#replace current month plan numbers
temp.tab[[3]][1] <- plan.data$OR_SEP[curr.month]
temp.tab[[3]][2] <- plan.data$CA_SEP[curr.month]
temp.tab[[3]][3] <- plan.data$GA_SEP[curr.month]
temp.tab[[7]][1] <- plan.data$OREGON[curr.month]
temp.tab[[7]][2] <- plan.data$CALIFORNIA[curr.month]
temp.tab[[7]][3] <- plan.data$GEORGIA[curr.month]
#update percent complete colums
temp.tab[[5]] <- temp.tab[[4]]/temp.tab[[3]]
temp.tab[[9]] <- temp.tab[[8]]/temp.tab[[7]]
#Add totals row
totals.row <- data.table("Totals",sum(temp.tab[[2]]),sum(temp.tab[[3]]),sum(temp.tab[[4]]),
round(sum(temp.tab[[4]])/sum(temp.tab[[3]]),4),
sum(temp.tab[[6]]),sum(temp.tab[[7]]),sum(temp.tab[[8]]),
round(sum(temp.tab[[8]])/sum(temp.tab[[7]]),4),
sum(temp.tab[[10]]),sum(temp.tab[[11]]),sum(temp.tab[[12]]),sum(temp.tab[[13]]))
all.locs.totals <- rbind(temp.tab,totals.row,use.names=FALSE)
write.csv(all.locs.totals,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/all.locs.total.csv")
###########################################
#CUMULATIVE CHARTS
###########################################
#change year to last year
yr <- as.numeric(format(Sys.Date(),"%y")) - 1
mo.labs <-c("1","2","3","4","5","6","7","8","9","10","11","12")
gr.areas <- c(500001,500003,500004,500007)
ga.gr.areas <- c(500001,500002,500007)
gr.labs <- c("1","3","4","7")
ga.gr.labs <- c("1","2","7")
#function for all months last year completed
moFunc <- function(locs, yr, mo.labs, a){
for(i in 1:12){
oldProd <- a[loc == locs & lb.yr == yr & lb.mo == mo.labs[i] & wo != 15 & wo != 99,
list(gross=sum(gre)),
keyby = grw]
if(i == 1){
one <- oldProd
} else one <- merge(one, oldProd,by="grw",all=TRUE)
newnm <- as.character(mo.labs[i])
setnames(one,"gross",newnm)
}
one[is.na(one)]<-0
return(one)
}
#function for all months this year planned
moFuncEQU <- function(locs, yr, mo.labs, a){
for(i in 1:mo){
oldProd <- plan.grower[LOCATION == locs & MONTH == mo.labs[i],
list(plan=sum(PLAN)), keyby = grw]
if(i == 1){
one <- oldProd
} else one <- merge(one, oldProd,by="grw",all=TRUE)
newnm <- as.character(mo.labs[i])
setnames(one,"plan",newnm)
}
mo <- as.numeric(format(Sys.Date(),"%m")) - 1
mo.1 <- mo + 1
for(i in mo.1:12){
oldProd <- a[loc == locs & year == yr & month == mo.labs[i],
list(plan=sum(equ.pr)),
keyby = grw]
one <- merge(one, oldProd,by="grw",all=TRUE)
newnm <- as.character(mo.labs[i])
setnames(one,"plan",newnm)
}
one[is.na(one)]<-0
return(one)
}
#ca
ca.old <- moFunc(170000,yr,mo.labs,a)
#or
or.old <- moFunc(160000,yr,mo.labs,a)
#ga
ga.old <- moFunc(550000,yr,mo.labs,a)
######################################
#last year Oregon
######################################
#separate growing areas into own vector
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(or.old,grw==gr.areas[i],select=mo.labs)))
gra <- gr.areas[i]
nam <- paste("or.LY.",gr.labs[i],sep="")
assign(nam,area.num)
}
######################################
#last year california
######################################
#separate growing areas into own vector
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(ca.old,grw==gr.areas[i],select=mo.labs)))
gra <- gr.areas[i]
nam <- paste("ca.LY.",gr.labs[i],sep="")
assign(nam,area.num)
}
######################################
#last year georgia
######################################
#separate growing areas into own vector
for(i in 1:length(ga.gr.areas)){
area.num <- as.vector(t(subset(ga.old,grw==ga.gr.areas[i],select=mo.labs)))
gra <- gr.areas[i]
nam <- paste("ga.LY.",ga.gr.labs[i],sep="")
assign(nam,area.num)
}
######################################
#this year prod plan
######################################
#setup all locations
#change year to current year
yr <- as.numeric(format(Sys.Date(),"%y"))
mo <- as.numeric(format(Sys.Date(),"%m"))-1
#ca
ca.plan <- moFuncEQU(170000,yr,mo.labs,a)
#or
or.plan <- moFuncEQU(160000,yr,mo.labs,a)
#ga
ga.plan <- moFuncEQU(550000,yr,mo.labs,a)
#separate growing areas into own vector
#oregon
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(or.plan,grw==gr.areas[i],select=mo.labs)))
nam <- paste("or.plan.",gr.labs[i],sep="")
assign(nam,area.num)
}
#california
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(ca.plan,grw==gr.areas[i],select=mo.labs)))
nam <- paste("ca.plan.",gr.labs[i],sep="")
assign(nam,area.num)
}
#georgia
for(i in 1:length(ga.gr.areas)){
area.num <- as.vector(t(subset(ga.plan,grw==ga.gr.areas[i],select=mo.labs)))
nam <- paste("ga.plan.",ga.gr.labs[i],sep="")
assign(nam,area.num)
}
######################################
#YTD numbers
######################################
mo.cnt <- as.numeric(format(Sys.Date(),"%m"))-1
morng <- 1:mo.cnt
mo.labs.2 <- mo.labs[1:mo.cnt]
yr <- 14
#function to get YTD numbers
moFuncYTD <- function(locs, yr, mo.labs, a, rng){
for(i in rng){
oldProd <- a[loc == locs & lb.yr == yr & lb.mo == mo.labs[i] & wo != 15 & wo != 99,
list(gross=sum(gre)),
keyby = grw]
if(i == 1){
one <- oldProd
} else one <- merge(one, oldProd,by="grw",all=TRUE)
newnm <- mo.labs[i]
setnames(one,"gross",newnm)
}
one[is.na(one)]<-0
return(one)
}
or.YTD <- moFuncYTD(160000, yr, mo.labs, a, morng)
ca.YTD <- moFuncYTD(170000, yr, mo.labs, a, morng)
ga.YTD <- moFuncYTD(550000, yr, mo.labs, a, morng)
##separate growing areas into own vector
#oregon
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(or.YTD,grw==gr.areas[i],select=mo.labs.2)))
nam <- paste("or.YTD.",gr.labs[i],sep="")
assign(nam,area.num)
}
#california
for(i in 1:length(gr.areas)){
area.num <- as.vector(t(subset(ca.YTD,grw==gr.areas[i],select=mo.labs.2)))
nam <- paste("ca.YTD.",gr.labs[i],sep="")
assign(nam,area.num)
}
#georgia
for(i in 1:length(ga.gr.areas)){
area.num <- as.vector(t(subset(or.YTD,grw==ga.gr.areas[i],select=mo.labs.2)))
nam <- paste("ga.YTD.",ga.gr.labs[i],sep="")
assign(nam,area.num)
}
################################
#for plotting
################################
addsums <- function(area){
i = 1
sums <- list()
newone <- 0
while(i < 13){
newone <- area[[i]] + newone
sums[[i]] <- newone
i = i + 1
}
sums <- as.numeric(sums)
}
####################################
#LY cumulative
####################################
#oregon LY cumulative vectors
or.loc.vec <- list()
or.loc.vec[[1]] <- or.LY.1
or.loc.vec[[2]] <- or.LY.3
or.loc.vec[[3]] <- or.LY.4
or.loc.vec[[4]] <- or.LY.7
for(i in 1:length(gr.areas)){
temp <- addsums(or.loc.vec[[i]])
nam <- paste("or.ly.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(or.loc.vec)
#california LY cumulative vectors
ca.loc.vec <- list()
ca.loc.vec[[1]] <- ca.LY.1
ca.loc.vec[[2]] <- ca.LY.3
ca.loc.vec[[3]] <- ca.LY.4
ca.loc.vec[[4]] <- ca.LY.7
for(i in 1:length(gr.areas)){
temp <- addsums(ca.loc.vec[[i]])
nam <- paste("ca.ly.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ca.loc.vec)
#georgia LY cumulative vectors
ga.loc.vec <- list()
ga.loc.vec[[1]] <- ga.LY.1
ga.loc.vec[[2]] <- ga.LY.2
ga.loc.vec[[3]] <- ga.LY.7
for(i in 1:length(ga.gr.areas)){
temp <- addsums(ga.loc.vec[[i]])
nam <- paste("ga.ly.",ga.gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ga.loc.vec)
#######################################
#plan cumulative
#######################################
#oregon plan cumulative vectors
or.loc.vec <- list()
or.loc.vec[[1]] <- or.plan.1
or.loc.vec[[2]] <- or.plan.3
or.loc.vec[[3]] <- or.plan.4
or.loc.vec[[4]] <- or.plan.7
for(i in 1:length(gr.areas)){
temp <- addsums(or.loc.vec[[i]])
nam <- paste("or.plan.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(or.loc.vec)
#california plan cumulative vectors
ca.loc.vec <- list()
ca.loc.vec[[1]] <- ca.plan.1
ca.loc.vec[[2]] <- ca.plan.3
ca.loc.vec[[3]] <- ca.plan.4
ca.loc.vec[[4]] <- ca.plan.7
for(i in 1:length(gr.areas)){
temp <- addsums(ca.loc.vec[[i]])
nam <- paste("ca.plan.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ca.loc.vec)
#georgia plan cumulative vectors
ga.loc.vec <- list()
ga.loc.vec[[1]] <- ga.plan.1
ga.loc.vec[[2]] <- ga.plan.2
ga.loc.vec[[3]] <- ga.plan.7
for(i in 1:length(ga.gr.areas)){
temp <- addsums(ga.loc.vec[[i]])
nam <- paste("ga.plan.",ga.gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ga.loc.vec)
#######################################
#YTD cumulative
#######################################
addsumsYTD <- function(area, months.cnt){
i = 1
sums <- list()
newone <- 0
while(i <= months.cnt){
newone <- area[[i]] + newone
sums[[i]] <- newone
i = i + 1
}
sums <- as.numeric(sums)
}
#oregon YTD cumulative vectors
or.loc.vec <- list()
or.loc.vec[[1]] <- or.YTD.1
or.loc.vec[[2]] <- or.YTD.3
or.loc.vec[[3]] <- or.YTD.4
or.loc.vec[[4]] <- or.YTD.7
for(i in 1:length(gr.areas)){
temp <- addsumsYTD(or.loc.vec[[i]],mo.cnt)
nam <- paste("or.YTD.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(or.loc.vec)
#california YTD cumulative vectors
ca.loc.vec <- list()
ca.loc.vec[[1]] <- ca.YTD.1
ca.loc.vec[[2]] <- ca.YTD.3
ca.loc.vec[[3]] <- ca.YTD.4
ca.loc.vec[[4]] <- ca.YTD.7
for(i in 1:length(gr.areas)){
temp <- addsumsYTD(ca.loc.vec[[i]],mo.cnt)
nam <- paste("ca.YTD.",gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ca.loc.vec)
#georgia YTD cumulative vectors
ga.loc.vec <- list()
ga.loc.vec[[1]] <- ga.YTD.1
ga.loc.vec[[2]] <- ga.YTD.2
ga.loc.vec[[3]] <- ga.YTD.7
for(i in 1:length(ga.gr.areas)){
temp <- addsumsYTD(ga.loc.vec[[i]],mo.cnt)
nam <- paste("ga.YTD.",ga.gr.labs[[i]],".cum",sep="")
assign(nam,temp)
}
remove(ga.loc.vec)
#############################################
#location totals
#############################################
##Function for getting cumulative by month for each location
loc.tot.vec <- function(dframe){
mos <- length(names(dframe))
cols <- list()
for(i in 2:mos){
#total each month into list of twelve totals
colasn <- i - 1
cols[[colasn]] <- sum(as.numeric(dframe[[i]]))
}
colsCum <- list()
for(i in 2:mos-1){
n <- i - 1
colsCum[[i]] <- sum(as.numeric(cols[1:n])) + cols[[i]]
}
colsCum[[1]] <- cols[[1]]
colsCum <- unlist(colsCum)
}
#or.plan.tots <- loc.tot.vec(or.plan)
or.plan.tots <- as.vector(plan.data$OREGON)
or.ly.tots <- loc.tot.vec(or.old)
or.YTD.tots <- loc.tot.vec(or.YTD)
#ca.plan.tots <- loc.tot.vec(ca.plan)
ca.plan.tots <- as.vector(plan.data$CALIFORNIA)
ca.ly.tots <- loc.tot.vec(ca.old)
ca.YTD.tots <- loc.tot.vec(ca.YTD)
#ga.plan.tots <- loc.tot.vec(ga.plan)
ga.plan.tots <- as.vector(plan.data$GEORGIA)
ga.ly.tots <- loc.tot.vec(ga.old)
ga.YTD.tots <- loc.tot.vec(ga.YTD)
all.locs.ly <- or.ly.tots + ca.ly.tots + ga.ly.tots
all.locs.ytd <- or.YTD.tots + ca.YTD.tots + ga.YTD.tots
all.locs.plan <- or.plan.tots + ca.plan.tots + ga.plan.tots
##############################################
#plotting
##############################################
#Get YTD % complete for each location
or.perc.comp <- paste(round((all.locs.totals[[9]][1])*100,digits=2),"%",sep="")
ca.perc.comp <- paste(round((all.locs.totals[[9]][2])*100,digits=2),"%",sep="")
ga.perc.comp <- paste(round((all.locs.totals[[9]][3])*100,digits=2),"%",sep="")
all.perc.comp <- paste(round((all.locs.totals[[9]][4])*100,digits=2),"%",sep="")
canplots <- function(plan,ly,ytd,perc.comp){
#create empty plot
mxplan <- max(plan)
mxly <- max(ly)
if(mxplan > mxly){
plotmat <- plan
} else plotmat <- ly
par(mar=c(5,4,5,2)+0.1)
plot(plotmat,
type = "n",
lwd = 2,
lty = 1,
yaxt = "n",
ylab = "",
xlab = "Months")
locs <- substr(deparse(substitute(plan)),1,2)
if(locs=="or"){
loc = "Oregon"
} else if(locs=="ca"){
loc = "California"
} else if(locs=="ga"){
loc = "Georgia"
} else loc = "All Locations"
topTitle <- paste(loc," Canning by Month",sep="")
lowTitle <- paste(perc.comp," of Total Year Plan Complete")
title(topTitle, cex.main = 1.8, font.main = 1.5, col.main = "darkgreen")
mtext(lowTitle,cex = 1.1)
#add ablines and ticks
ylns <- c(1000000,2000000,3000000,4000000,5000000,6000000,7000000,8000000,9000000,10000000,11000000,12000000,13000000,14000000,15000000,16000000,
17000000,18000000,19000000,20000000,21000000,22000000,23000000,24000000,25000000,26000000,27000000,28000000,29000000,30000000,31000000,32000000,
33000000,34000000,35000000,36000000,37000000,38000000,39000000,40000000)
ytxt <- c("1M","2M","3M","4M","5M","6M","7M","8M","9M","10M","11M","12M","13M","14M","15M","16M",
"17M","18M","19M","20M","21M","22M","23M","24M","25M","26M","27M","28M","29M","30M","31M","32M",
"33M","34M","35M","36M","37M","38M","39M","40M")
axis(2,at=ylns,labels=ytxt, las=1)
abline(h=ylns,col="gray88",lty="dashed")
xlns <- c(1,2,3,4,5,6,7,8,9,10,11,12)
abline(v=xlns,col="gray88",lty="dashed")
#add lines
lines(ly, col = "grey", lwd = 2, lty="dotted")
lines(plan, col = "azure4", lwd = 2)
lines(ytd, col = "red", lwd = 5)
legend("topleft",
c("2013","2014 Planned","2014 YTD"),
col=c("grey","azure4","red"),
lty=c(3,1,1),
lwd=c(2,2,5),
cex=0.5)
}
orplt <- canplots(or.plan.tots,or.ly.tots,or.YTD.tots,or.perc.comp)
caplt <- canplots(ca.plan.tots,ca.ly.tots,ca.YTD.tots,ca.perc.comp)
gaplt <- canplots(ga.plan.tots,ga.ly.tots,ga.YTD.tots,ga.perc.comp)
allplt <- canplots(all.locs.plan,all.locs.ly,all.locs.ytd,all.perc.comp)
#############################################
#EQU DATA
#############################################
#############################################
yr <- as.numeric(format(Sys.Date(),"%y"))
#function to create dataframe for a given location and growing area for complete YTD or just month
createframeEQU <- function(location, GA, n=1, year=FALSE){
#n is 1 for "size" or 2 for "cat"
kbys = c("size","cat")
if(year==TRUE){
temp.a <- a[loc==location & year==yr & month<=mo & grw==GA & wo!=15 & wo!=99 & wo!=0,
list(orig.qty=sum(equ),prod=sum(equ.pr)),keyby=eval(kbys[n])]
temp.b <- a[loc==location & lb.yr==yr & lb.mo<=mo & grw==GA & wo!=15 & wo!=99 & wo!=0,
list(grss=sum(gre)),keyby=eval(kbys[n])]
temp.a <- merge(temp.a,temp.b,by=eval(kbys[n]),all=TRUE)
temp.a[is.na(temp.a)] <- 0
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
if(n==1){
newrow <- data.frame(size=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
}else newrow <- data.frame(cat=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
temp.a <- rbind(temp.a,newrow)
}else{
temp.a <- a[loc==location & year==yr & month==mo & grw==GA & wo!=15 & wo!=99 & wo!=0,
list(orig.qty=sum(equ),prod=sum(equ.pr),grss=sum(gre)),keyby=eval(kbys[n])]
temp.a$perc.comp <- temp.a$grss/temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$orig.qty)
c3 <- sum(temp.a$prod)
c4 <- sum(temp.a$grss)
c5 <- c4/c3
if(n==1){
newrow <- data.frame(size=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
}else newrow <- data.frame(cat=c1,orig.qty=c2,prod=c3,grss=c4,perc.comp=c5)
temp.a <- rbind(temp.a,newrow)
}
temp.a
}
yr.ca.sz.500001 <- createframeEQU(170000,500001,year=TRUE)
yr.ca.sz.500003 <- createframeEQU(170000,500003,year=TRUE)
yr.ca.sz.500004 <- createframeEQU(170000,500004,year=TRUE)
yr.ca.cat.500001 <- createframeEQU(170000,500001,n=2,year=TRUE)
yr.ca.cat.500003 <- createframeEQU(170000,500003,n=2,year=TRUE)
yr.ca.cat.500004 <- createframeEQU(170000,500004,n=2,year=TRUE)
yr.or.sz.500001 <- createframeEQU(160000,500001,year=TRUE)
yr.or.sz.500003 <- createframeEQU(160000,500003,year=TRUE)
yr.or.sz.500004 <- createframeEQU(160000,500004,year=TRUE)
yr.or.cat.500001 <- createframeEQU(160000,500001,n=2,year=TRUE)
yr.or.cat.500003 <- createframeEQU(160000,500003,n=2,year=TRUE)
yr.or.cat.500004 <- createframeEQU(160000,500004,n=2,year=TRUE)
yr.ga.sz.500001 <- createframeEQU(550000,500001,year=TRUE)
yr.ga.sz.500002 <- createframeEQU(550000,500002,year=TRUE)
yr.ga.cat.500001 <- createframeEQU(550000,500001,n=2,year=TRUE)
yr.ga.cat.500002 <- createframeEQU(550000,500002,n=2,year=TRUE)
mo.ca.sz.500001 <- createframeEQU(170000,500001)
mo.ca.sz.500003 <- createframeEQU(170000,500003)
mo.ca.sz.500004 <- createframeEQU(170000,500004)
mo.ca.cat.500001 <- createframeEQU(170000,500001,n=2)
mo.ca.cat.500003 <- createframeEQU(170000,500003,n=2)
mo.ca.cat.500004 <- createframeEQU(170000,500004,n=2)
mo.or.sz.500001 <- createframeEQU(160000,500001)
mo.or.sz.500003 <- createframeEQU(160000,500003)
mo.or.sz.500004 <- createframeEQU(160000,500004)
mo.or.cat.500001 <- createframeEQU(160000,500001,n=2)
mo.or.cat.500003 <- createframeEQU(160000,500003,n=2)
mo.or.cat.500004 <- createframeEQU(160000,500004,n=2)
mo.ga.sz.500001 <- createframeEQU(550000,500001)
mo.ga.sz.500002 <- createframeEQU(550000,500002)
mo.ga.cat.500001 <- createframeEQU(550000,500001,n=2)
mo.ga.cat.500002 <- createframeEQU(550000,500002,n=2)
###########################################
#Propagation
############################################
for(i in grow.loc.rng){
#mo data
mo.1 <- as.numeric(format(Sys.Date(),"%m")) - 1
#december handler
if(mo.1 == 0) mo.1 <- 12
mo <- mo.1
nam <- paste("prop.mo.",grow.loc[i],sep="")
p.mo <- a[loc == grow.loc[i] & year == 14 & month == mo & grw == 500007 & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(equ),prod=sum(equ.pr),grss=sum(gre)),
keyby = st.op]
p.mo[is.na(p.mo)]<-0
p.mo$perc <- p.mo$grss/p.mo$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(p.mo$orig.qty)
c3 <- sum(p.mo$prod)
c4 <- sum(p.mo$grss)
c5 <- c4/c3
newrow <- data.frame(st.op = c1, orig.qty = c2, prod = c3, grss = c4, perc = c5)
p.mo <- rbind(p.mo, newrow)
assign(nam,p.mo)
#yr data
nam <- paste("prop.yr.",grow.loc[i],sep="")
p.yr <- a[loc == grow.loc[i] & year == 14 & month <= mo & grw == 500007 & wo != 15 & wo != 99 & wo != 0,
list(orig.qty=sum(equ),prod=sum(equ.pr)),
keyby = st.op]
temp.a <- a[loc == grow.loc[i] & lb.yr == 14 & lb.mo <= mo & grw == 500007 & wo != 15 & wo != 99 & wo!=0,
list(grss=sum(gre)),
keyby = st.op]
p.yr <- merge(p.yr,temp.a,by="st.op",all=TRUE)
p.yr[is.na(p.yr)]<-0
p.yr$perc <- p.mo$grss/p.mo$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(p.yr$orig.qty)
c3 <- sum(p.yr$prod)
c4 <- sum(p.yr$grss)
c5 <- c4/c3
newrow <- data.frame(st.op = c1, orig.qty = c2, prod = c3, grss = c4, perc = c5)
p.yr <- rbind(p.yr, newrow)
assign(nam,p.yr)
#ft data
mo <- as.numeric(mo.1) + 1
nam <- paste("prop.ft.",grow.loc[i],sep="")
temp.a <- a[loc == grow.loc[i] & year == 14 & month == mo & grw == 500007,
list(prod=sum(equ.pr)),
keyby = st.op]
mo <- as.numeric(mo.1) + 2
temp.b <- a[loc == grow.loc[i] & year == 14 & month == mo & grw == 500007,
list(prod=sum(equ.pr)),
keyby = st.op]
mo <- as.numeric(mo.1) + 3
temp.c <- a[loc == grow.loc[i] & year == 14 & month == mo & grw == 500007,
list(prod=sum(equ.pr)),
keyby = st.op]
temp.a <- merge(temp.a,temp.b,by="st.op",all=TRUE)
temp.a <- merge(temp.a, temp.c,by="st.op",all=TRUE)
temp.a[is.na(temp.a)]<-0
temp.a$sums <- temp.a$prod.x + temp.a$prod.y + temp.a$prod
#add totals row
c1 <- "zTotals"
c2 <- sum(temp.a$prod.x)
c3 <- sum(temp.a$prod.y)
c4 <- sum(temp.a$prod)
c5 <- c2 + c3 + c4
newrow <- data.frame(st.op = c1, prod.x = c2, prod.y = c3, prod = c4, sums = c5)
temp.a <- rbind(temp.a, newrow)
assign(nam,temp.a)
}
###########################################
#write files to folder
###########################################
#ca by size
##ga 1
ca.sz.1 <- merge(mo.ca.sz.500001,yr.ca.sz.500001,by="size",all=TRUE)
ca.sz.1 <- merge(ca.sz.1,ft.ca.sz.500001,by="size",all=TRUE)
ca.sz.1[is.na(ca.sz.1)] <- 0
write.csv(ca.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.1.EQU.csv")
##ga 3
ca.sz.3 <- merge(mo.ca.sz.500003,yr.ca.sz.500003,by="size",all=TRUE)
ca.sz.3 <- merge(ca.sz.3,ft.ca.sz.500003,by="size",all=TRUE)
ca.sz.3[is.na(ca.sz.3)] <- 0
write.csv(ca.sz.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.3.EQU.csv")
##ga 4
ca.sz.4 <- merge(mo.ca.sz.500004,yr.ca.sz.500004,by="size",all=TRUE)
ca.sz.4 <- merge(ca.sz.4,ft.ca.sz.500004,by="size",all=TRUE)
ca.sz.4[is.na(ca.sz.4)] <- 0
write.csv(ca.sz.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.sz.4.EQU.csv")
#ca by cat
##ga 1
ca.cat.1 <- merge(mo.ca.cat.500001,yr.ca.cat.500001,by="cat",all=TRUE)
ca.cat.1 <- merge(ca.cat.1,ft.ca.cat.500001,by="cat",all=TRUE)
ca.cat.1[is.na(ca.cat.1)] <- 0
write.csv(ca.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.1.EQU.csv")
##ga 3
ca.cat.3 <- merge(mo.ca.cat.500003,yr.ca.cat.500003,by="cat",all=TRUE)
ca.cat.3 <- merge(ca.cat.3,ft.ca.cat.500003,by="cat",all=TRUE)
ca.cat.3[is.na(ca.cat.3)] <- 0
write.csv(ca.cat.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.3.EQU.csv")
##ga 4
ca.cat.4 <- merge(mo.ca.cat.500004,yr.ca.cat.500004,by="cat",all=TRUE)
ca.cat.4 <- merge(ca.cat.4,ft.ca.cat.500004,by="cat",all=TRUE)
ca.cat.4[is.na(ca.cat.4)] <- 0
write.csv(ca.cat.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.cat.4.EQU.csv")
##prop
ca.prop <- merge(prop.mo.170000,prop.yr.170000,by="st.op",all=TRUE)
ca.prop <- merge(ca.prop,prop.ft.170000,by="st.op",all=TRUE)
ca.prop[is.na(ca.prop)] <- 0
write.csv(ca.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ca.zprop.EQU.csv")
#or by size
or.sz.1 <- merge(mo.or.sz.500001,yr.or.sz.500001,by="size",all=TRUE)
or.sz.1 <- merge(or.sz.1,ft.or.sz.500001,by="size",all=TRUE)
or.sz.1[is.na(or.sz.1)] <- 0
write.csv(or.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.1.EQU.csv")
##ga 3
or.sz.3 <- merge(mo.or.sz.500003,yr.or.sz.500003,by="size",all=TRUE)
or.sz.3 <- merge(or.sz.3,ft.or.sz.500003,by="size",all=TRUE)
or.sz.3[is.na(or.sz.3)] <- 0
write.csv(or.sz.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.3.EQU.csv")
##ga 4
or.sz.4 <- merge(mo.or.sz.500004,yr.or.sz.500004,by="size",all=TRUE)
or.sz.4 <- merge(or.sz.4,ft.or.sz.500004,by="size",all=TRUE)
or.sz.4[is.na(or.sz.4)] <- 0
write.csv(or.sz.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.sz.4.EQU.csv")
#or by cat
##ga 1
or.cat.1 <- merge(mo.or.cat.500001,yr.or.cat.500001,by="cat",all=TRUE)
or.cat.1 <- merge(or.cat.1,ft.or.cat.500001,by="cat",all=TRUE)
or.cat.1[is.na(or.cat.1)] <- 0
write.csv(or.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.1.EQU.csv")
##ga 3
or.cat.3 <- merge(mo.or.cat.500003,yr.or.cat.500003,by="cat",all=TRUE)
or.cat.3 <- merge(or.cat.3,ft.or.cat.500003,by="cat",all=TRUE)
or.cat.3[is.na(or.cat.3)] <- 0
write.csv(or.cat.3,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.3.EQU.csv")
##ga 4
or.cat.4 <- merge(mo.or.cat.500004,yr.or.cat.500004,by="cat",all=TRUE)
or.cat.4 <- merge(or.cat.4,ft.or.cat.500004,by="cat",all=TRUE)
or.cat.4[is.na(or.cat.4)] <- 0
write.csv(or.cat.4,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.cat.4.EQU.csv")
##prop
or.prop <- merge(prop.mo.160000,prop.yr.160000,by="st.op",all=TRUE)
or.prop <- merge(or.prop,prop.ft.160000,by="st.op",all=TRUE)
or.prop[is.na(or.prop)] <- 0
write.csv(or.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/or.zprop.EQU.csv")
#ga by size
#ga 1
ga.sz.1 <- merge(mo.ga.sz.500001,yr.ga.sz.500001,by="size",all=TRUE)
ga.sz.1 <- merge(ga.sz.1,ft.ga.sz.500001,by="size",all=TRUE)
ga.sz.1[is.na(ga.sz.1)] <- 0
write.csv(ga.sz.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.sz.1.EQU.csv")
##ga 2
ga.sz.2 <- merge(mo.ga.sz.500002,yr.ga.sz.500002,by="size",all=TRUE)
ga.sz.2 <- merge(ga.sz.2,ft.ga.sz.500002,by="size",all=TRUE)
ga.sz.2[is.na(ga.sz.2)] <- 0
write.csv(ga.sz.2,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.sz.2.EQU.csv")
#ga by cat
##ga 1
ga.cat.1 <- merge(mo.ga.cat.500001,yr.ga.cat.500001,by="cat",all=TRUE)
ga.cat.1 <- merge(ga.cat.1,ft.ga.cat.500001,by="cat",all=TRUE)
ga.cat.1[is.na(ga.cat.1)] <- 0
write.csv(ga.cat.1,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.cat.1.EQU.csv")
##ga 2
ga.cat.2 <- merge(mo.ga.cat.500002,yr.ga.cat.500002,by="cat",all=TRUE)
ga.cat.2 <- merge(ga.cat.2,ft.ga.cat.500002,by="cat",all=TRUE)
ga.cat.2[is.na(ga.cat.2)] <- 0
write.csv(ga.cat.2,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.cat.2.EQU.csv")
##prop
ga.prop <- merge(prop.mo.550000,prop.yr.550000,by="st.op",all=TRUE)
ga.prop <- merge(ga.prop,prop.ft.550000,by="st.op",all=TRUE)
ga.prop[is.na(ga.prop)] <- 0
write.csv(ga.prop,file="C:/Users/kadavison/Desktop/Reports/Mim_Recreations/Canning_Reports/csv_files/ga.zprop.EQU.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{pp_tree}
\alias{pp_tree}
\title{An example of a taxonomy tree in newick format.}
\format{A data frame with only one entry
\describe{
\item{V1}{ tree in newick format}
}}
\usage{
pp_tree
}
\description{
An example of a taxonomy tree in newick format.
}
\keyword{datasets}
| /man/pp_tree.Rd | permissive | trvinh/test | R | false | true | 381 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{pp_tree}
\alias{pp_tree}
\title{An example of a taxonomy tree in newick format.}
\format{A data frame with only one entry
\describe{
\item{V1}{ tree in newick format}
}}
\usage{
pp_tree
}
\description{
An example of a taxonomy tree in newick format.
}
\keyword{datasets}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.99939407587056e-241, 6.68887130434692e-198, 1.31001627192305e-309, 3.45453955987373e+304, 7.72577355637176e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta_interleaved_matrices/AFL_communities_individual_based_sampling_beta_interleaved_matrices/communities_individual_based_sampling_beta_interleaved_matrices_valgrind_files/1615840194-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 404 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.99939407587056e-241, 6.68887130434692e-198, 1.31001627192305e-309, 3.45453955987373e+304, 7.72577355637176e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist)
str(result) |
context("vert_id")
test_that("vert_id works", {
skip_on_cran()
vcr::use_cassette("vert_id", {
aa <- vert_id(ids = "urn:catalog:CM:Herps:116520", messages = FALSE)
})
expect_is(aa, "list")
expect_is(aa$meta, "list")
expect_is(aa$data, "data.frame")
expect_equal(NROW(aa$data), 1)
expect_named(aa$meta, c('request_date','response_records','submitted_query',
'request_origin','limit','last_cursor',
'query_version','matching_records','api_version'))
expect_true(grepl("bufo debilis", tolower(aa$data$scientificname)))
})
test_that("vert_id multiple ids", {
skip_on_cran()
ids <- c("http://arctos.database.museum/guid/MSB:Mamm:56979?seid=1643089",
"urn:catalog:CM:Herps:116520")
vcr::use_cassette("vert_id_multiple_ids", {
aa <- vert_id(ids, messages = FALSE)
})
expect_is(aa, "list")
expect_is(aa$meta, "list")
expect_is(aa$data, "data.frame")
expect_equal(NROW(aa$data), 2)
expect_named(aa$meta, c('request_date','response_records','submitted_query',
'request_origin','limit','last_cursor',
'query_version','matching_records','api_version'))
expect_true(any(grepl("zapus", tolower(aa$data$scientificname))))
expect_true(any(grepl("bufo", tolower(aa$data$scientificname))))
})
| /tests/testthat/test-vert_id.R | permissive | ropensci/rvertnet | R | false | false | 1,343 | r | context("vert_id")
test_that("vert_id works", {
skip_on_cran()
vcr::use_cassette("vert_id", {
aa <- vert_id(ids = "urn:catalog:CM:Herps:116520", messages = FALSE)
})
expect_is(aa, "list")
expect_is(aa$meta, "list")
expect_is(aa$data, "data.frame")
expect_equal(NROW(aa$data), 1)
expect_named(aa$meta, c('request_date','response_records','submitted_query',
'request_origin','limit','last_cursor',
'query_version','matching_records','api_version'))
expect_true(grepl("bufo debilis", tolower(aa$data$scientificname)))
})
test_that("vert_id multiple ids", {
skip_on_cran()
ids <- c("http://arctos.database.museum/guid/MSB:Mamm:56979?seid=1643089",
"urn:catalog:CM:Herps:116520")
vcr::use_cassette("vert_id_multiple_ids", {
aa <- vert_id(ids, messages = FALSE)
})
expect_is(aa, "list")
expect_is(aa$meta, "list")
expect_is(aa$data, "data.frame")
expect_equal(NROW(aa$data), 2)
expect_named(aa$meta, c('request_date','response_records','submitted_query',
'request_origin','limit','last_cursor',
'query_version','matching_records','api_version'))
expect_true(any(grepl("zapus", tolower(aa$data$scientificname))))
expect_true(any(grepl("bufo", tolower(aa$data$scientificname))))
})
|
library(dplyr)
legacy_nearest <- function(x,compare) {
distances <- c()
if(compare[90] == "False") #prefers gk
range <- 30:58
else
range <- 59:63
for (i in 1:nrow(x)) {
distance <- 0
for(j in range){
distance <- distance + (as.numeric(x[[i,j]])*(as.numeric(x[[i,j]])
-(as.numeric(compare[j]))*(as.numeric(compare[j]))))
}
distances <- c(distances,distance)
}
x[(order(distances)),]
}
| /LegacyNearestNeighbour.R | no_license | KevinMcGin/FIFA_FYP | R | false | false | 458 | r | library(dplyr)
legacy_nearest <- function(x,compare) {
distances <- c()
if(compare[90] == "False") #prefers gk
range <- 30:58
else
range <- 59:63
for (i in 1:nrow(x)) {
distance <- 0
for(j in range){
distance <- distance + (as.numeric(x[[i,j]])*(as.numeric(x[[i,j]])
-(as.numeric(compare[j]))*(as.numeric(compare[j]))))
}
distances <- c(distances,distance)
}
x[(order(distances)),]
}
|
library(psych)
library(Amelia)
library(ggplot2)
library(gridExtra)
library(CorrMixed)
library(gtools)
library(mgcv)
library(visreg)
library(lubridate)
library(irr)
x <- read.csv("CNB Longitudinal/er40_20191113.csv")
x[,4] <- as.numeric(ymd(x[,4])) # date of test
colnames(x)[4] <- "Time"
colnames(x)[3] <- "bblid"
colnames(x)[11] <- "Sex"
ids <- unique(x$bblid)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
if (is.element("N",temp[,6])) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])}
try(if (min(temp$er40_cr,na.rm=TRUE) < 10) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])})
try(if (max(temp$er40_rtcr,na.rm=TRUE) > 6000) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])})
x[which(x$bblid == ids[i]),] <- temp}
x <- x[which(x$ntimepoints > 1),] # best is > 1
x <- x[which(x$timepoint < 5),] # best is < 6
x <- x[which(is.na(x$cnbAgemonths) == FALSE),]
Age <- scale(x$cnbAgemonths)
Age_Squared <- Age^2
Age_Cubed <- Age^3
TP <- scale(x$timepoint)
TP_Squared <- scale(x$timepoint)^2
TP_Cubed <- scale(x$timepoint)^3
#set.seed(2)
#temp <- amelia(x[,16:19], m=1)$imputations[[1]]
#x[,16:19] <- temp
x[,16:17] <- scale(x[,16:17])
ACC_r <- scale(winsor(lm(er40_cr~Age+Age_Squared+Age_Cubed,data=x)$residuals,trim=0.005))
RT_r <- scale(winsor(lm(er40_rtcr~Age+Age_Squared+Age_Cubed,data=x)$residuals,trim=0.005))
x <- data.frame(x,ACC_r,RT_r)
# arranges times to all start at 0
ids <- unique(x$bblid)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
temp$Time <- temp$Time - min(temp$Time,na.rm=TRUE)
x[which(x$bblid == ids[i]),] <- temp}
int <- matrix(NA,dim(x)[1],1)
x <- data.frame(x,int)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
for (j in 1:dim(temp)[1]) {
if (temp[j,18] == 9) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 8),4])}
if (temp[j,18] == 8) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 7),4])}
if (temp[j,18] == 7) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 6),4])}
if (temp[j,18] == 6) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 5),4])}
if (temp[j,18] == 5) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 4),4])}
if (temp[j,18] == 4) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 3),4])}
if (temp[j,18] == 3) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 2),4])}
if (temp[j,18] == 2) {try(temp[j,22] <- temp[j,4])}
if (temp[j,18] == 1) {try(temp[j,22] <- NA)}
x[which(x$bblid == ids[i]),] <- temp}}
max_int <- max(x$int,na.rm=TRUE)
x$cnbAgemonths <- x$cnbAgemonths/12
x$Time <- x$Time/365.25
Time_Squared <- scale(x$Time)^2
Age <- x$cnbAgemonths
int_ord <- as.factor(quantcut(x$int,3))
x <- data.frame(x,Time_Squared)
# raw basic models
gam1 <- gamm(er40_cr~Sex+s(Age,by=Sex),random=list(bblid=~1),data=x)
gam2 <- gamm(er40_rtcr~Sex+s(Age,by=Sex),random=list(bblid=~1),data=x)
# models after age is regressed out cross-sectionally - give idea of practice effect
gam3 <- gamm(ACC_r~Sex+s(Age,by=Sex),random=list(bblid=~1),data=x)
gam4 <- gamm(RT_r~Sex+s(Age,by=Sex),random=list(bblid=~1),data=x)
# how does inter-test interval affect score?
gam5 <- gamm(ACC_r~Sex+s(int,by=Sex),random=list(bblid=~1),data=x)
gam6 <- gamm(RT_r~Sex+s(int,by=Sex),random=list(bblid=~1),data=x)
# how does interval affect the practice effect?
gam7 <- gamm(ACC_r~int_ord+s(Time,by=int_ord),random=list(bblid=~1),data=x)
gam8 <- gamm(RT_r~int_ord+s(Time,by=int_ord),random=list(bblid=~1),data=x)
# models after age is regressed out cross-sectionally - give idea of practice effect for TIME - this ignores practice/age interactions
gam9 <- gamm(ACC_r~Sex+s(Time,by=Sex),random=list(bblid=~1),data=x)
gam10 <- gamm(RT_r~Sex+s(Time,by=Sex),random=list(bblid=~1),data=x)
# MORE MODELS ARE MADE WITHIN THE pdf() CREATION SECTION
gam1$gam$data <- x
gam2$gam$data <- x
gam3$gam$data <- x
gam4$gam$data <- x
gam5$gam$data <- x
gam6$gam$data <- x
gam7$gam$data <- x
gam8$gam$data <- x
gam9$gam$data <- x
gam10$gam$data <- x
# old code for visually examining models
#summary(lme(er40_cr~Age_Squared+(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))
#summary(lme(dprime~Age_Squared+(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))
#summary(lme(er40_rtcr~Age_Squared+(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))
#summary(lme(ACC_r~Age_Squared+(Sex+Age)^2,data=x,random = ~ 1 | bblid))
#summary(lme(dprime_r~Age_Squared+(Sex+Age)^2,data=x,random = ~ 1 | bblid))
#summary(lme(RT_r~Age_Squared+(Sex+Age)^2,data=x,random = ~ 1 | bblid))
pdf("Repeated-Measures_Visuals_ER40.pdf",width=10,height=6)
visreg(gam1$gam,xvar="Age",by="Sex",overlay=TRUE,ylab="Accuracy",partial=FALSE,rug=FALSE) #main interest
visreg(gam2$gam,xvar="Age",by="Sex",overlay=TRUE,ylab="Response Time",partial=FALSE,rug=FALSE) #main interest
visreg(gam3$gam,xvar="Age",by="Sex",overlay=TRUE,ylab="Accuracy (age-regressed)",partial=FALSE,rug=FALSE) # rel'n with age after age-regress
visreg(gam4$gam,xvar="Age",by="Sex",overlay=TRUE,ylab="Response Time (age-regressed)",partial=FALSE,rug=FALSE) # rel'n with age after age-regress
visreg(gam9$gam,xvar="Time",by="Sex",overlay=TRUE,ylab="Accuracy (age-regressed)",partial=FALSE,rug=FALSE,strip.names=TRUE) # rel'n with time after age-regress
visreg(gam10$gam,xvar="Time",by="Sex",overlay=TRUE,ylab="Response Time (age-regressed)",partial=FALSE,rug=FALSE,strip.names=TRUE) # rel'n with time after age-regress
visreg(gam5$gam,xvar="int",by="Sex",overlay=TRUE,ylab="Accuracy (age-regressed)",xlab="Time Since Previous Administration",partial=FALSE,rug=FALSE) # interval
visreg(gam6$gam,xvar="int",by="Sex",overlay=TRUE,ylab="Response Time (age-regressed)",xlab="Time Since Previous Administration",partial=FALSE,rug=FALSE) # interval
visreg(gam7$gam,xvar="Time",by="int_ord",overlay=TRUE,ylab="Accuracy (age-regressed)",partial=FALSE,rug=FALSE,strip.names=TRUE) # how does practice vary by interval?
visreg(gam8$gam,xvar="Time",by="int_ord",overlay=TRUE,ylab="Response Time (age-regressed)",partial=FALSE,rug=FALSE,strip.names=TRUE) # how does practice vary by interval?
# new models built within visreg() - these are only to see the linear effects
visreg(lme(er40_cr~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid),xvar="cnbAgemonths",by="Sex",overlay=TRUE,xlab="Age (Years)",ylab="Accuracy") # linear
visreg(lme(er40_rtcr~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid),xvar="cnbAgemonths",by="Sex",overlay=TRUE,xlab="Age (Years)",ylab="Response Time") # linear
visreg(lme(ACC_r~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid),xvar="cnbAgemonths",by="Sex",overlay=TRUE,xlab="Age (Years)",ylab="Accuracy (Age-Regressed)") # linear
visreg(lme(RT_r~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid),xvar="cnbAgemonths",by="Sex",overlay=TRUE,xlab="Age (Years)",ylab="Response Time (Age-Regressed)") # linear
visreg(lme(ACC_r~(Sex+int)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude),xvar="int",by="Sex",overlay=TRUE,xlab="Time Since Last Administration",ylab="Accuracy (Age-Regressed)") # interval
visreg(lme(RT_r~(Sex+int)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude),xvar="int",by="Sex",overlay=TRUE,xlab="Time Since Last Administration",ylab="Response Time (Age-Regressed)") # interval
# below show actual mixed model results
plot.new()
title("dependent variable = er40_cr (raw Accuracy)")
grid.table(summary(lme(er40_cr~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))$tTable)
plot.new()
title("dependent variable = er40_rtcr (mean raw response time)")
grid.table(summary(lme(er40_rtcr~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))$tTable)
plot.new()
title("dependent variable = Accuracy (age-regressed)")
grid.table(summary(lme(ACC_r~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))$tTable)
plot.new()
title("dependent variable = response time (age-regressed)")
grid.table(summary(lme(RT_r~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))$tTable)
# below we need people at time point 1 to have an interval score (set at highest in data set)
x$int[is.na(x$int) == TRUE] <- max_int
plot.new()
title("dependent variable = er40_cr (raw Accuracy)")
grid.table(summary(lme(er40_cr~(int+Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude))$tTable) # try including interval
plot.new()
title("dependent variable = er40_rtcr (mean raw response time)")
grid.table(summary(lme(er40_rtcr~(int+Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude))$tTable) # try including interval
# these are the main models for practice effects - how do the number of previous admins and inter-admin interval affect age-regressed scores?
plot.new()
title("dependent variable = Accuracy (age-regressed)")
grid.table(summary(lme(ACC_r~(int+timepoint)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude))$tTable) # big main model: age plus interval plus prev admins
plot.new()
title("dependent variable = response time (age-regressed)")
grid.table(summary(lme(RT_r~(int+timepoint)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude))$tTable) # big main model: age plus interval plus prev admins
# building these models here because if we built them up top with the other models, no one would have an interval score for time point 1
mod11 <- lme(ACC_r~(int+timepoint)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude)
mod12 <- lme(RT_r~(int+timepoint)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude)
# mod16 through mod18 are without the time point 1 interval
x$timepoint <- as.factor(x$timepoint)
mod13 <- lme(ACC_r~(int+timepoint)^2,data=x[which(x$timepoint != "1"),],random = ~ 1 | bblid,na.action=na.exclude)
mod14 <- lme(RT_r~(int+timepoint)^2,data=x[which(x$timepoint != "1"),],random = ~ 1 | bblid,na.action=na.exclude)
# plot the above six models in various ways.
visreg(mod11,xvar="int",by="timepoint",overlay=TRUE,xlab="Time Since Last Administration",ylab="Accuracy (Age-Regressed)",strip.names=TRUE)
visreg(mod12,xvar="int",by="timepoint",overlay=TRUE,xlab="Time Since Last Administration",ylab="Response Time (Age-Regressed)",strip.names=TRUE)
visreg(mod11,xvar="timepoint",by="int",overlay=TRUE,xlab="Timepoint (administration number)",ylab="Accuracy (Age-Regressed)",strip.names=TRUE)
visreg(mod12,xvar="timepoint",by="int",overlay=TRUE,xlab="Timepoint (administration number)",ylab="Response Time (Age-Regressed)",strip.names=TRUE)
visreg(mod13,xvar="int",by="timepoint",overlay=TRUE,xlab="Time Since Last Administration",ylab="Accuracy (Age-Regressed)",strip.names=TRUE)
visreg(mod14,xvar="int",by="timepoint",overlay=TRUE,xlab="Time Since Last Administration",ylab="Response Time (Age-Regressed)",strip.names=TRUE)
visreg(mod13,xvar="timepoint",by="int",overlay=TRUE,xlab="Timepoint (administration number)",ylab="Accuracy (Age-Regressed)",strip.names=TRUE)
visreg(mod14,xvar="timepoint",by="int",overlay=TRUE,xlab="Timepoint (administration number)",ylab="Response Time (Age-Regressed)",strip.names=TRUE)
dev.off()
# make wide and run test-retest stats
x <- read.csv("CNB Longitudinal/er40_20191113.csv")
x[,4] <- as.numeric(ymd(x[,4])) # date of test
colnames(x)[4] <- "Time"
colnames(x)[3] <- "bblid"
colnames(x)[11] <- "Sex"
ids <- unique(x$bblid)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
if (is.element("N",temp[,6])) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])}
try(if (min(temp$er40_cr,na.rm=TRUE) < 10) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])})
try(if (max(temp$er40_rtcr,na.rm=TRUE) > 6000) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])})
x[which(x$bblid == ids[i]),] <- temp}
x <- x[which(x$ntimepoints > 1),] # best is > 1
x <- x[which(x$timepoint < 6),] # best is < 6
x <- x[which(is.na(x$cnbAgemonths) == FALSE),]
Age <- scale(x$cnbAgemonths)
Age_Squared <- Age^2
Age_Cubed <- Age^3
TP <- scale(x$timepoint)
TP_Squared <- scale(x$timepoint)^2
TP_Cubed <- scale(x$timepoint)^3
#set.seed(2)
#temp <- amelia(x[,16:19], m=1)$imputations[[1]]
#x[,16:19] <- temp
x[,16:17] <- scale(x[,16:17])
#regressing out age
ACC_ar <- scale(winsor(lm(er40_cr~Age+Age_Squared+Age_Cubed,data=x)$residuals,trim=0.005))
RT_ar <- scale(winsor(lm(er40_rtcr~Age+Age_Squared+Age_Cubed,data=x)$residuals,trim=0.005))
x <- data.frame(x,ACC_ar,RT_ar)
# arranges times to all start at 0
ids <- unique(x$bblid)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
temp$Time <- temp$Time - min(temp$Time,na.rm=TRUE)
x[which(x$bblid == ids[i]),] <- temp}
int <- matrix(NA,dim(x)[1],1)
x <- data.frame(x,int)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
for (j in 1:dim(temp)[1]) {
if (temp[j,18] == 9) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 8),4])}
if (temp[j,18] == 8) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 7),4])}
if (temp[j,18] == 7) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 6),4])}
if (temp[j,18] == 6) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 5),4])}
if (temp[j,18] == 5) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 4),4])}
if (temp[j,18] == 4) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 3),4])}
if (temp[j,18] == 3) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 2),4])}
if (temp[j,18] == 2) {try(temp[j,22] <- temp[j,4])}
if (temp[j,18] == 1) {try(temp[j,22] <- max_int)}
x[which(x$bblid == ids[i]),] <- temp}}
int_sq <- scale(x[,22])^2
int_cub <- scale(x[,22])^3
#regressing out age and practice
ACC_apr <- scale(winsor(lm(er40_cr~Age+Age_Squared+Age_Cubed+TP+TP_Squared+TP_Cubed+int+int_sq+int_cub,data=x)$residuals,trim=0.005))
RT_apr <- scale(winsor(lm(er40_rtcr~Age+Age_Squared+Age_Cubed+TP+TP_Squared+TP_Cubed+int+int_sq+int_cub,data=x)$residuals,trim=0.005))
x <- data.frame(x,ACC_apr,RT_apr)
x1 <- x[which(x$timepoint == 1),]
x2 <- x[which(x$timepoint == 2),]
x3 <- x[which(x$timepoint == 3),]
x4 <- x[which(x$timepoint == 4),]
x5 <- x[which(x$timepoint == 5),]
x <- merge(x1,x2,by="bblid",all=TRUE)
x <- merge(x,x3,by="bblid",all=TRUE)
x <- merge(x,x4,by="bblid",all=TRUE)
x <- merge(x,x5,by="bblid",all=TRUE)
#create columns names because there are duplicate columns in current frame
colnames(x) <- c(
"bblid",
"datasetid_TIME_1",
"siteid_TIME_1",
"Time_TIME_1",
"battery_TIME_1",
"valid_code_TIME_1",
"age_TIME_1",
"education_TIME_1",
"feducation_TIME_1",
"meducation_TIME_1",
"Sex_TIME_1",
"handedness_TIME_1",
"cnbAgemonths_TIME_1",
"er40_genus_TIME_1",
"er40_valid_TIME_1",
"er40_cr_TIME_1",
"er40_rtcr_TIME_1",
"timepoint_TIME_1",
"ntimepoints_TIME_1",
"ACC_ar_TIME_1",
"RT_ar_TIME_1",
"int_TIME_1",
"ACC_apr_TIME_1",
"RT_apr_TIME_1",
"datasetid_TIME_2",
"siteid_TIME_2",
"Time_TIME_2",
"battery_TIME_2",
"valid_code_TIME_2",
"age_TIME_2",
"education_TIME_2",
"feducation_TIME_2",
"meducation_TIME_2",
"Sex_TIME_2",
"handedness_TIME_2",
"cnbAgemonths_TIME_2",
"er40_genus_TIME_2",
"er40_valid_TIME_2",
"er40_cr_TIME_2",
"er40_rtcr_TIME_2",
"timepoint_TIME_2",
"ntimepoints_TIME_2",
"ACC_ar_TIME_2",
"RT_ar_TIME_2",
"int_TIME_2",
"ACC_apr_TIME_2",
"RT_apr_TIME_2",
"datasetid_TIME_3",
"siteid_TIME_3",
"Time_TIME_3",
"battery_TIME_3",
"valid_code_TIME_3",
"age_TIME_3",
"education_TIME_3",
"feducation_TIME_3",
"meducation_TIME_3",
"Sex_TIME_3",
"handedness_TIME_3",
"cnbAgemonths_TIME_3",
"er40_genus_TIME_3",
"er40_valid_TIME_3",
"er40_cr_TIME_3",
"er40_rtcr_TIME_3",
"timepoint_TIME_3",
"ntimepoints_TIME_3",
"ACC_ar_TIME_3",
"RT_ar_TIME_3",
"int_TIME_3",
"ACC_apr_TIME_3",
"RT_apr_TIME_3",
"datasetid_TIME_4",
"siteid_TIME_4",
"Time_TIME_4",
"battery_TIME_4",
"valid_code_TIME_4",
"age_TIME_4",
"education_TIME_4",
"feducation_TIME_4",
"meducation_TIME_4",
"Sex_TIME_4",
"handedness_TIME_4",
"cnbAgemonths_TIME_4",
"er40_genus_TIME_4",
"er40_valid_TIME_4",
"er40_cr_TIME_4",
"er40_rtcr_TIME_4",
"timepoint_TIME_4",
"ntimepoints_TIME_4",
"ACC_ar_TIME_4",
"RT_ar_TIME_4",
"int_TIME_4",
"ACC_apr_TIME_4",
"RT_apr_TIME_4",
"datasetid_TIME_5",
"siteid_TIME_5",
"Time_TIME_5",
"battery_TIME_5",
"valid_code_TIME_5",
"age_TIME_5",
"education_TIME_5",
"feducation_TIME_5",
"meducation_TIME_5",
"Sex_TIME_5",
"handedness_TIME_5",
"cnbAgemonths_TIME_5",
"er40_genus_TIME_5",
"er40_valid_TIME_5",
"er40_cr_TIME_5",
"er40_rtcr_TIME_5",
"timepoint_TIME_5",
"ntimepoints_TIME_5",
"ACC_ar_TIME_5",
"RT_ar_TIME_5",
"int_TIME_5",
"ACC_apr_TIME_5",
"RT_apr_TIME_5")
ACC <- data.frame(
x$er40_cr_TIME_1,
x$er40_cr_TIME_2,
x$er40_cr_TIME_3,
x$er40_cr_TIME_4,
x$er40_cr_TIME_5)
RT <- data.frame(
x$er40_rtcr_TIME_1,
x$er40_rtcr_TIME_2,
x$er40_rtcr_TIME_3,
x$er40_rtcr_TIME_4,
x$er40_rtcr_TIME_5)
ACC_ar <- data.frame(
x$ACC_ar_TIME_1,
x$ACC_ar_TIME_2,
x$ACC_ar_TIME_3,
x$ACC_ar_TIME_4,
x$ACC_ar_TIME_5)
RT_ar <- data.frame(
x$RT_ar_TIME_1,
x$RT_ar_TIME_2,
x$RT_ar_TIME_3,
x$RT_ar_TIME_4,
x$RT_ar_TIME_5)
ACC_apr <- data.frame(
x$ACC_apr_TIME_1,
x$ACC_apr_TIME_2,
x$ACC_apr_TIME_3,
x$ACC_apr_TIME_4,
x$ACC_apr_TIME_5)
RT_apr <- data.frame(
x$RT_apr_TIME_1,
x$RT_apr_TIME_2,
x$RT_apr_TIME_3,
x$RT_apr_TIME_4,
x$RT_apr_TIME_5)
res <- matrix(NA,4,4)
for (i in 2:5) {
res[1,(i-1)] <- icc(ACC_apr[,1:i],type="agreement",model="twoway")$value
res[2,(i-1)] <- icc(RT_apr[,1:i],type="agreement",model="twoway")$value
res[3,(i-1)] <- icc(ACC_ar[,1:i],type="agreement",model="twoway")$value
res[4,(i-1)] <- icc(RT_ar[,1:i],type="agreement",model="twoway")$value
}
header <- c("2 Timepoints","3 Timepoints","4 Timepoints","5 Timepoints")
TestRetest_for_ER40 <- c("","Accuracy (age- & practice-regressed)","Response Time (age- & practice-regressed)","Accuracy (age-regressed)","Response Time (age-regressed)")
pdf("TestRetest_ER40.pdf",width=15,height=15)
grid.table(cbind(TestRetest_for_ER40,rbind(header,round(res,3))))
pairs.panels(ACC_apr,lm=TRUE)
pairs.panels(RT_apr,lm=TRUE)
dev.off()
| /scripts/cognitive/tyler_clean_CNB/CNB_repeated_measures_30January2020_ER40.R | no_license | PennBBL/pncLongitudinalPsychosis | R | false | false | 18,115 | r |
library(psych)
library(Amelia)
library(ggplot2)
library(gridExtra)
library(CorrMixed)
library(gtools)
library(mgcv)
library(visreg)
library(lubridate)
library(irr)
x <- read.csv("CNB Longitudinal/er40_20191113.csv")
x[,4] <- as.numeric(ymd(x[,4])) # date of test
colnames(x)[4] <- "Time"
colnames(x)[3] <- "bblid"
colnames(x)[11] <- "Sex"
ids <- unique(x$bblid)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
if (is.element("N",temp[,6])) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])}
try(if (min(temp$er40_cr,na.rm=TRUE) < 10) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])})
try(if (max(temp$er40_rtcr,na.rm=TRUE) > 6000) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])})
x[which(x$bblid == ids[i]),] <- temp}
x <- x[which(x$ntimepoints > 1),] # best is > 1
x <- x[which(x$timepoint < 5),] # best is < 6
x <- x[which(is.na(x$cnbAgemonths) == FALSE),]
Age <- scale(x$cnbAgemonths)
Age_Squared <- Age^2
Age_Cubed <- Age^3
TP <- scale(x$timepoint)
TP_Squared <- scale(x$timepoint)^2
TP_Cubed <- scale(x$timepoint)^3
#set.seed(2)
#temp <- amelia(x[,16:19], m=1)$imputations[[1]]
#x[,16:19] <- temp
x[,16:17] <- scale(x[,16:17])
ACC_r <- scale(winsor(lm(er40_cr~Age+Age_Squared+Age_Cubed,data=x)$residuals,trim=0.005))
RT_r <- scale(winsor(lm(er40_rtcr~Age+Age_Squared+Age_Cubed,data=x)$residuals,trim=0.005))
x <- data.frame(x,ACC_r,RT_r)
# arranges times to all start at 0
ids <- unique(x$bblid)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
temp$Time <- temp$Time - min(temp$Time,na.rm=TRUE)
x[which(x$bblid == ids[i]),] <- temp}
int <- matrix(NA,dim(x)[1],1)
x <- data.frame(x,int)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
for (j in 1:dim(temp)[1]) {
if (temp[j,18] == 9) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 8),4])}
if (temp[j,18] == 8) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 7),4])}
if (temp[j,18] == 7) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 6),4])}
if (temp[j,18] == 6) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 5),4])}
if (temp[j,18] == 5) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 4),4])}
if (temp[j,18] == 4) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 3),4])}
if (temp[j,18] == 3) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 2),4])}
if (temp[j,18] == 2) {try(temp[j,22] <- temp[j,4])}
if (temp[j,18] == 1) {try(temp[j,22] <- NA)}
x[which(x$bblid == ids[i]),] <- temp}}
max_int <- max(x$int,na.rm=TRUE)
x$cnbAgemonths <- x$cnbAgemonths/12
x$Time <- x$Time/365.25
Time_Squared <- scale(x$Time)^2
Age <- x$cnbAgemonths
int_ord <- as.factor(quantcut(x$int,3))
x <- data.frame(x,Time_Squared)
# raw basic models
gam1 <- gamm(er40_cr~Sex+s(Age,by=Sex),random=list(bblid=~1),data=x)
gam2 <- gamm(er40_rtcr~Sex+s(Age,by=Sex),random=list(bblid=~1),data=x)
# models after age is regressed out cross-sectionally - give idea of practice effect
gam3 <- gamm(ACC_r~Sex+s(Age,by=Sex),random=list(bblid=~1),data=x)
gam4 <- gamm(RT_r~Sex+s(Age,by=Sex),random=list(bblid=~1),data=x)
# how does inter-test interval affect score?
gam5 <- gamm(ACC_r~Sex+s(int,by=Sex),random=list(bblid=~1),data=x)
gam6 <- gamm(RT_r~Sex+s(int,by=Sex),random=list(bblid=~1),data=x)
# how does interval affect the practice effect?
gam7 <- gamm(ACC_r~int_ord+s(Time,by=int_ord),random=list(bblid=~1),data=x)
gam8 <- gamm(RT_r~int_ord+s(Time,by=int_ord),random=list(bblid=~1),data=x)
# models after age is regressed out cross-sectionally - give idea of practice effect for TIME - this ignores practice/age interactions
gam9 <- gamm(ACC_r~Sex+s(Time,by=Sex),random=list(bblid=~1),data=x)
gam10 <- gamm(RT_r~Sex+s(Time,by=Sex),random=list(bblid=~1),data=x)
# MORE MODELS ARE MADE WITHIN THE pdf() CREATION SECTION
gam1$gam$data <- x
gam2$gam$data <- x
gam3$gam$data <- x
gam4$gam$data <- x
gam5$gam$data <- x
gam6$gam$data <- x
gam7$gam$data <- x
gam8$gam$data <- x
gam9$gam$data <- x
gam10$gam$data <- x
# old code for visually examining models
#summary(lme(er40_cr~Age_Squared+(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))
#summary(lme(dprime~Age_Squared+(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))
#summary(lme(er40_rtcr~Age_Squared+(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))
#summary(lme(ACC_r~Age_Squared+(Sex+Age)^2,data=x,random = ~ 1 | bblid))
#summary(lme(dprime_r~Age_Squared+(Sex+Age)^2,data=x,random = ~ 1 | bblid))
#summary(lme(RT_r~Age_Squared+(Sex+Age)^2,data=x,random = ~ 1 | bblid))
pdf("Repeated-Measures_Visuals_ER40.pdf",width=10,height=6)
visreg(gam1$gam,xvar="Age",by="Sex",overlay=TRUE,ylab="Accuracy",partial=FALSE,rug=FALSE) #main interest
visreg(gam2$gam,xvar="Age",by="Sex",overlay=TRUE,ylab="Response Time",partial=FALSE,rug=FALSE) #main interest
visreg(gam3$gam,xvar="Age",by="Sex",overlay=TRUE,ylab="Accuracy (age-regressed)",partial=FALSE,rug=FALSE) # rel'n with age after age-regress
visreg(gam4$gam,xvar="Age",by="Sex",overlay=TRUE,ylab="Response Time (age-regressed)",partial=FALSE,rug=FALSE) # rel'n with age after age-regress
visreg(gam9$gam,xvar="Time",by="Sex",overlay=TRUE,ylab="Accuracy (age-regressed)",partial=FALSE,rug=FALSE,strip.names=TRUE) # rel'n with time after age-regress
visreg(gam10$gam,xvar="Time",by="Sex",overlay=TRUE,ylab="Response Time (age-regressed)",partial=FALSE,rug=FALSE,strip.names=TRUE) # rel'n with time after age-regress
visreg(gam5$gam,xvar="int",by="Sex",overlay=TRUE,ylab="Accuracy (age-regressed)",xlab="Time Since Previous Administration",partial=FALSE,rug=FALSE) # interval
visreg(gam6$gam,xvar="int",by="Sex",overlay=TRUE,ylab="Response Time (age-regressed)",xlab="Time Since Previous Administration",partial=FALSE,rug=FALSE) # interval
visreg(gam7$gam,xvar="Time",by="int_ord",overlay=TRUE,ylab="Accuracy (age-regressed)",partial=FALSE,rug=FALSE,strip.names=TRUE) # how does practice vary by interval?
visreg(gam8$gam,xvar="Time",by="int_ord",overlay=TRUE,ylab="Response Time (age-regressed)",partial=FALSE,rug=FALSE,strip.names=TRUE) # how does practice vary by interval?
# new models built within visreg() - these are only to see the linear effects
visreg(lme(er40_cr~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid),xvar="cnbAgemonths",by="Sex",overlay=TRUE,xlab="Age (Years)",ylab="Accuracy") # linear
visreg(lme(er40_rtcr~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid),xvar="cnbAgemonths",by="Sex",overlay=TRUE,xlab="Age (Years)",ylab="Response Time") # linear
visreg(lme(ACC_r~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid),xvar="cnbAgemonths",by="Sex",overlay=TRUE,xlab="Age (Years)",ylab="Accuracy (Age-Regressed)") # linear
visreg(lme(RT_r~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid),xvar="cnbAgemonths",by="Sex",overlay=TRUE,xlab="Age (Years)",ylab="Response Time (Age-Regressed)") # linear
visreg(lme(ACC_r~(Sex+int)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude),xvar="int",by="Sex",overlay=TRUE,xlab="Time Since Last Administration",ylab="Accuracy (Age-Regressed)") # interval
visreg(lme(RT_r~(Sex+int)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude),xvar="int",by="Sex",overlay=TRUE,xlab="Time Since Last Administration",ylab="Response Time (Age-Regressed)") # interval
# below show actual mixed model results
plot.new()
title("dependent variable = er40_cr (raw Accuracy)")
grid.table(summary(lme(er40_cr~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))$tTable)
plot.new()
title("dependent variable = er40_rtcr (mean raw response time)")
grid.table(summary(lme(er40_rtcr~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))$tTable)
plot.new()
title("dependent variable = Accuracy (age-regressed)")
grid.table(summary(lme(ACC_r~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))$tTable)
plot.new()
title("dependent variable = response time (age-regressed)")
grid.table(summary(lme(RT_r~(Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid))$tTable)
# below we need people at time point 1 to have an interval score (set at highest in data set)
x$int[is.na(x$int) == TRUE] <- max_int
plot.new()
title("dependent variable = er40_cr (raw Accuracy)")
grid.table(summary(lme(er40_cr~(int+Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude))$tTable) # try including interval
plot.new()
title("dependent variable = er40_rtcr (mean raw response time)")
grid.table(summary(lme(er40_rtcr~(int+Sex+cnbAgemonths)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude))$tTable) # try including interval
# these are the main models for practice effects - how do the number of previous admins and inter-admin interval affect age-regressed scores?
plot.new()
title("dependent variable = Accuracy (age-regressed)")
grid.table(summary(lme(ACC_r~(int+timepoint)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude))$tTable) # big main model: age plus interval plus prev admins
plot.new()
title("dependent variable = response time (age-regressed)")
grid.table(summary(lme(RT_r~(int+timepoint)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude))$tTable) # big main model: age plus interval plus prev admins
# building these models here because if we built them up top with the other models, no one would have an interval score for time point 1
mod11 <- lme(ACC_r~(int+timepoint)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude)
mod12 <- lme(RT_r~(int+timepoint)^2,data=x,random = ~ 1 | bblid,na.action=na.exclude)
# mod16 through mod18 are without the time point 1 interval
x$timepoint <- as.factor(x$timepoint)
mod13 <- lme(ACC_r~(int+timepoint)^2,data=x[which(x$timepoint != "1"),],random = ~ 1 | bblid,na.action=na.exclude)
mod14 <- lme(RT_r~(int+timepoint)^2,data=x[which(x$timepoint != "1"),],random = ~ 1 | bblid,na.action=na.exclude)
# plot the above six models in various ways.
visreg(mod11,xvar="int",by="timepoint",overlay=TRUE,xlab="Time Since Last Administration",ylab="Accuracy (Age-Regressed)",strip.names=TRUE)
visreg(mod12,xvar="int",by="timepoint",overlay=TRUE,xlab="Time Since Last Administration",ylab="Response Time (Age-Regressed)",strip.names=TRUE)
visreg(mod11,xvar="timepoint",by="int",overlay=TRUE,xlab="Timepoint (administration number)",ylab="Accuracy (Age-Regressed)",strip.names=TRUE)
visreg(mod12,xvar="timepoint",by="int",overlay=TRUE,xlab="Timepoint (administration number)",ylab="Response Time (Age-Regressed)",strip.names=TRUE)
visreg(mod13,xvar="int",by="timepoint",overlay=TRUE,xlab="Time Since Last Administration",ylab="Accuracy (Age-Regressed)",strip.names=TRUE)
visreg(mod14,xvar="int",by="timepoint",overlay=TRUE,xlab="Time Since Last Administration",ylab="Response Time (Age-Regressed)",strip.names=TRUE)
visreg(mod13,xvar="timepoint",by="int",overlay=TRUE,xlab="Timepoint (administration number)",ylab="Accuracy (Age-Regressed)",strip.names=TRUE)
visreg(mod14,xvar="timepoint",by="int",overlay=TRUE,xlab="Timepoint (administration number)",ylab="Response Time (Age-Regressed)",strip.names=TRUE)
dev.off()
# make wide and run test-retest stats
x <- read.csv("CNB Longitudinal/er40_20191113.csv")
x[,4] <- as.numeric(ymd(x[,4])) # date of test
colnames(x)[4] <- "Time"
colnames(x)[3] <- "bblid"
colnames(x)[11] <- "Sex"
ids <- unique(x$bblid)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
if (is.element("N",temp[,6])) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])}
try(if (min(temp$er40_cr,na.rm=TRUE) < 10) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])})
try(if (max(temp$er40_rtcr,na.rm=TRUE) > 6000) {temp <- matrix(NA,dim(temp)[1],dim(temp)[2])})
x[which(x$bblid == ids[i]),] <- temp}
x <- x[which(x$ntimepoints > 1),] # best is > 1
x <- x[which(x$timepoint < 6),] # best is < 6
x <- x[which(is.na(x$cnbAgemonths) == FALSE),]
Age <- scale(x$cnbAgemonths)
Age_Squared <- Age^2
Age_Cubed <- Age^3
TP <- scale(x$timepoint)
TP_Squared <- scale(x$timepoint)^2
TP_Cubed <- scale(x$timepoint)^3
#set.seed(2)
#temp <- amelia(x[,16:19], m=1)$imputations[[1]]
#x[,16:19] <- temp
x[,16:17] <- scale(x[,16:17])
#regressing out age
ACC_ar <- scale(winsor(lm(er40_cr~Age+Age_Squared+Age_Cubed,data=x)$residuals,trim=0.005))
RT_ar <- scale(winsor(lm(er40_rtcr~Age+Age_Squared+Age_Cubed,data=x)$residuals,trim=0.005))
x <- data.frame(x,ACC_ar,RT_ar)
# arranges times to all start at 0
ids <- unique(x$bblid)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
temp$Time <- temp$Time - min(temp$Time,na.rm=TRUE)
x[which(x$bblid == ids[i]),] <- temp}
int <- matrix(NA,dim(x)[1],1)
x <- data.frame(x,int)
for (i in 1:length(ids)) {
temp <- x[which(x$bblid == ids[i]),]
for (j in 1:dim(temp)[1]) {
if (temp[j,18] == 9) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 8),4])}
if (temp[j,18] == 8) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 7),4])}
if (temp[j,18] == 7) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 6),4])}
if (temp[j,18] == 6) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 5),4])}
if (temp[j,18] == 5) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 4),4])}
if (temp[j,18] == 4) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 3),4])}
if (temp[j,18] == 3) {try(temp[j,22] <- temp[j,4] - temp[which(temp$timepoint == 2),4])}
if (temp[j,18] == 2) {try(temp[j,22] <- temp[j,4])}
if (temp[j,18] == 1) {try(temp[j,22] <- max_int)}
x[which(x$bblid == ids[i]),] <- temp}}
int_sq <- scale(x[,22])^2
int_cub <- scale(x[,22])^3
#regressing out age and practice
ACC_apr <- scale(winsor(lm(er40_cr~Age+Age_Squared+Age_Cubed+TP+TP_Squared+TP_Cubed+int+int_sq+int_cub,data=x)$residuals,trim=0.005))
RT_apr <- scale(winsor(lm(er40_rtcr~Age+Age_Squared+Age_Cubed+TP+TP_Squared+TP_Cubed+int+int_sq+int_cub,data=x)$residuals,trim=0.005))
x <- data.frame(x,ACC_apr,RT_apr)
x1 <- x[which(x$timepoint == 1),]
x2 <- x[which(x$timepoint == 2),]
x3 <- x[which(x$timepoint == 3),]
x4 <- x[which(x$timepoint == 4),]
x5 <- x[which(x$timepoint == 5),]
x <- merge(x1,x2,by="bblid",all=TRUE)
x <- merge(x,x3,by="bblid",all=TRUE)
x <- merge(x,x4,by="bblid",all=TRUE)
x <- merge(x,x5,by="bblid",all=TRUE)
#create columns names because there are duplicate columns in current frame
colnames(x) <- c(
"bblid",
"datasetid_TIME_1",
"siteid_TIME_1",
"Time_TIME_1",
"battery_TIME_1",
"valid_code_TIME_1",
"age_TIME_1",
"education_TIME_1",
"feducation_TIME_1",
"meducation_TIME_1",
"Sex_TIME_1",
"handedness_TIME_1",
"cnbAgemonths_TIME_1",
"er40_genus_TIME_1",
"er40_valid_TIME_1",
"er40_cr_TIME_1",
"er40_rtcr_TIME_1",
"timepoint_TIME_1",
"ntimepoints_TIME_1",
"ACC_ar_TIME_1",
"RT_ar_TIME_1",
"int_TIME_1",
"ACC_apr_TIME_1",
"RT_apr_TIME_1",
"datasetid_TIME_2",
"siteid_TIME_2",
"Time_TIME_2",
"battery_TIME_2",
"valid_code_TIME_2",
"age_TIME_2",
"education_TIME_2",
"feducation_TIME_2",
"meducation_TIME_2",
"Sex_TIME_2",
"handedness_TIME_2",
"cnbAgemonths_TIME_2",
"er40_genus_TIME_2",
"er40_valid_TIME_2",
"er40_cr_TIME_2",
"er40_rtcr_TIME_2",
"timepoint_TIME_2",
"ntimepoints_TIME_2",
"ACC_ar_TIME_2",
"RT_ar_TIME_2",
"int_TIME_2",
"ACC_apr_TIME_2",
"RT_apr_TIME_2",
"datasetid_TIME_3",
"siteid_TIME_3",
"Time_TIME_3",
"battery_TIME_3",
"valid_code_TIME_3",
"age_TIME_3",
"education_TIME_3",
"feducation_TIME_3",
"meducation_TIME_3",
"Sex_TIME_3",
"handedness_TIME_3",
"cnbAgemonths_TIME_3",
"er40_genus_TIME_3",
"er40_valid_TIME_3",
"er40_cr_TIME_3",
"er40_rtcr_TIME_3",
"timepoint_TIME_3",
"ntimepoints_TIME_3",
"ACC_ar_TIME_3",
"RT_ar_TIME_3",
"int_TIME_3",
"ACC_apr_TIME_3",
"RT_apr_TIME_3",
"datasetid_TIME_4",
"siteid_TIME_4",
"Time_TIME_4",
"battery_TIME_4",
"valid_code_TIME_4",
"age_TIME_4",
"education_TIME_4",
"feducation_TIME_4",
"meducation_TIME_4",
"Sex_TIME_4",
"handedness_TIME_4",
"cnbAgemonths_TIME_4",
"er40_genus_TIME_4",
"er40_valid_TIME_4",
"er40_cr_TIME_4",
"er40_rtcr_TIME_4",
"timepoint_TIME_4",
"ntimepoints_TIME_4",
"ACC_ar_TIME_4",
"RT_ar_TIME_4",
"int_TIME_4",
"ACC_apr_TIME_4",
"RT_apr_TIME_4",
"datasetid_TIME_5",
"siteid_TIME_5",
"Time_TIME_5",
"battery_TIME_5",
"valid_code_TIME_5",
"age_TIME_5",
"education_TIME_5",
"feducation_TIME_5",
"meducation_TIME_5",
"Sex_TIME_5",
"handedness_TIME_5",
"cnbAgemonths_TIME_5",
"er40_genus_TIME_5",
"er40_valid_TIME_5",
"er40_cr_TIME_5",
"er40_rtcr_TIME_5",
"timepoint_TIME_5",
"ntimepoints_TIME_5",
"ACC_ar_TIME_5",
"RT_ar_TIME_5",
"int_TIME_5",
"ACC_apr_TIME_5",
"RT_apr_TIME_5")
ACC <- data.frame(
x$er40_cr_TIME_1,
x$er40_cr_TIME_2,
x$er40_cr_TIME_3,
x$er40_cr_TIME_4,
x$er40_cr_TIME_5)
RT <- data.frame(
x$er40_rtcr_TIME_1,
x$er40_rtcr_TIME_2,
x$er40_rtcr_TIME_3,
x$er40_rtcr_TIME_4,
x$er40_rtcr_TIME_5)
ACC_ar <- data.frame(
x$ACC_ar_TIME_1,
x$ACC_ar_TIME_2,
x$ACC_ar_TIME_3,
x$ACC_ar_TIME_4,
x$ACC_ar_TIME_5)
RT_ar <- data.frame(
x$RT_ar_TIME_1,
x$RT_ar_TIME_2,
x$RT_ar_TIME_3,
x$RT_ar_TIME_4,
x$RT_ar_TIME_5)
ACC_apr <- data.frame(
x$ACC_apr_TIME_1,
x$ACC_apr_TIME_2,
x$ACC_apr_TIME_3,
x$ACC_apr_TIME_4,
x$ACC_apr_TIME_5)
RT_apr <- data.frame(
x$RT_apr_TIME_1,
x$RT_apr_TIME_2,
x$RT_apr_TIME_3,
x$RT_apr_TIME_4,
x$RT_apr_TIME_5)
res <- matrix(NA,4,4)
for (i in 2:5) {
res[1,(i-1)] <- icc(ACC_apr[,1:i],type="agreement",model="twoway")$value
res[2,(i-1)] <- icc(RT_apr[,1:i],type="agreement",model="twoway")$value
res[3,(i-1)] <- icc(ACC_ar[,1:i],type="agreement",model="twoway")$value
res[4,(i-1)] <- icc(RT_ar[,1:i],type="agreement",model="twoway")$value
}
header <- c("2 Timepoints","3 Timepoints","4 Timepoints","5 Timepoints")
TestRetest_for_ER40 <- c("","Accuracy (age- & practice-regressed)","Response Time (age- & practice-regressed)","Accuracy (age-regressed)","Response Time (age-regressed)")
pdf("TestRetest_ER40.pdf",width=15,height=15)
grid.table(cbind(TestRetest_for_ER40,rbind(header,round(res,3))))
pairs.panels(ACC_apr,lm=TRUE)
pairs.panels(RT_apr,lm=TRUE)
dev.off()
|
#Write an R code to create a list of data objects of numeric, string, real number and name them
n1=c(10,20,30)
n1
s1=c("abc","def","ghi")
s1
r1=c(1.5,2.3,7.7,4.9,-3.6)
r1
l1=list(n1,s1,r1)
l1
str(l1)
| /first.R | no_license | TonyJM/DSR-Lab | R | false | false | 222 | r | #Write an R code to create a list of data objects of numeric, string, real number and name them
n1=c(10,20,30)
n1
s1=c("abc","def","ghi")
s1
r1=c(1.5,2.3,7.7,4.9,-3.6)
r1
l1=list(n1,s1,r1)
l1
str(l1)
|
test_that("assess_has_examples returns expected result for source packages", {
expect_equal(unclass(assess_source_good$has_examples[[1]]), TRUE)
expect_equal(unclass(assess_source_bad$has_examples[[1]]), TRUE)
})
test_that("check behavior of package that does not export any objects", {
expect_length(pkg_ref_source_bad2$examples, 0)
expect_length(assess_source_bad2$has_examples, 0)
expect_equal(unclass(score_source_bad2$has_examples[[1]]), NA)
})
| /tests/testthat/test_assess_has_examples.R | permissive | pharmaR/riskmetric | R | false | false | 461 | r | test_that("assess_has_examples returns expected result for source packages", {
expect_equal(unclass(assess_source_good$has_examples[[1]]), TRUE)
expect_equal(unclass(assess_source_bad$has_examples[[1]]), TRUE)
})
test_that("check behavior of package that does not export any objects", {
expect_length(pkg_ref_source_bad2$examples, 0)
expect_length(assess_source_bad2$has_examples, 0)
expect_equal(unclass(score_source_bad2$has_examples[[1]]), NA)
})
|
## Put comments here that give an overall description of what your
## functions do
## This function cache the the inverse of matrix during the first step and store the inverse matrix in the enviroment for later pulling
## The code is directly modified from the makeVector example, this step stores the inverted matrix in the enviroment
makeCacheMatrix<-function(x=matrix()){
invert<-NULL
set<-function(y){
x<<-y
invert<<-NULL
}
get<-function()x
setInverse<-function(inverse)invert<<-inverse
getInverse<-function() invert
list(set=set,
get=get,
setInverse=setInverse,
getInverse=getInverse)
}
# The second function can use matrix cached from the first step.
cacheSolve<-function(x,...){
invert<-x$getInverse()
if(!is.null(invert)){
message("getting cached data")
return(invert)
}
mat<-x$get()
invert<-solve(mat,...)
x$setInverse(invert)
invert
}
| /cachematrix.R | no_license | jjlljj234/ProgrammingAssignment2 | R | false | false | 966 | r | ## Put comments here that give an overall description of what your
## functions do
## This function cache the the inverse of matrix during the first step and store the inverse matrix in the enviroment for later pulling
## The code is directly modified from the makeVector example, this step stores the inverted matrix in the enviroment
makeCacheMatrix<-function(x=matrix()){
invert<-NULL
set<-function(y){
x<<-y
invert<<-NULL
}
get<-function()x
setInverse<-function(inverse)invert<<-inverse
getInverse<-function() invert
list(set=set,
get=get,
setInverse=setInverse,
getInverse=getInverse)
}
# The second function can use matrix cached from the first step.
cacheSolve<-function(x,...){
invert<-x$getInverse()
if(!is.null(invert)){
message("getting cached data")
return(invert)
}
mat<-x$get()
invert<-solve(mat,...)
x$setInverse(invert)
invert
}
|
## ----------------------- Chapter 7: Correspondence Analysis
## ----- simple correspondence analysis
superfan <- as.table(matrix(c(9, 12, 8, 1, 13, 1, 6, 20, 15, 4, 23, 18), ncol = 3))
attr(superfan, "dimnames") <- list(Band = c("Slayer", "Iron Maiden", "Metallica", "Judas Priest"),
Fan = c("Horst", "Helga", "Klaus"))
superfan
fit_chisq <- chisq.test(superfan)
fit_chisq
S <- fit_chisq$residuals
round(S, 3)
library("vcd")
mosaic(superfan, shade = TRUE)
P <- prop.table(superfan)
round(P, 3) ## table with relative frequencies
r_mass <- margin.table(P, 1)
round(r_mass, 3) ## row masses
c_mass <- margin.table(P, 2)
round(c_mass, 3) ## column masses
r_profile <- prop.table(P, 1)
round(r_profile, 3) ## conditional relative frequencies rows
c_profile <- prop.table(P, 2)
round(c_profile, 3) ## conditional relative frequencies columns
ar_profile <- t(r_profile) %*% r_mass ## average row profile
round(as.vector(ar_profile), 3)
round(as.vector(c_mass), 3) ## column masses
ac_profile <- c_profile %*% c_mass ## average column profile
round(as.vector(ac_profile), 3)
round(as.vector(r_mass), 3) ## row masses
library("plot3D")
tc <- r_profile
scatter3D(x = tc[,1], y = tc[,2], z = tc[,3], xlab = "Horst", ylab = "Helga", zlab = "Klaus", colkey = FALSE,
col = 1, pch = 20, xlim = c(0,1), ylim = c(0,1), zlim = c(0,1), ticktype = "simple", type = "h",
phi = 40, theta = 50, main = "Row Profiles", bty = "g")
points3D(x = c(0,0,1), y = c(0,1,0), z = c(1,0,0), col = "red", add = TRUE, pch = 20)
lines3D(x = c(0,0,1,0), y = c(0,1,0,0), z = c(1,0,0,1), col = "red", add = TRUE)
text3D(x = tc[,1], y = tc[,2], z = tc[,3], labels = rownames(tc), pos = 3, add = TRUE, cex = 0.8, adj = -0.1)
library("ggtern")
tf <- as.data.frame.matrix(superfan)
c_mass <- as.vector(c_mass)
lines <- data.frame(x = c(c_mass[1], 1-c_mass[3], 0),
y = c(1-c_mass[1], 0, c_mass[2]),
z = c(0, c_mass[3], 1-c_mass[2]),
xend = c(c_mass[1], c_mass[1], c_mass[1]),
yend = c(c_mass[2], c_mass[2], c_mass[2]),
zend = c(c_mass[3], c_mass[3], c_mass[3]), row.names = NULL)
gt <- ggtern(data = tf, aes(Horst, Helga, Klaus))
gt + geom_point() + theme_rgbw() + geom_text(label = rownames(tf), vjust = -0.5) +
geom_point(aes(x = c_mass[1], y = c_mass[2], z = c_mass[3]), colour = "red", size = 4) +
geom_segment(data = lines, aes(x = x, y = y, z = z, xend = xend, yend = yend, zend = zend),
color = 'red', size = 0.5) +
labs(title = "Ternary Plot")
## ------------------------------------------------------------------------
sqrt(sum((r_profile["Slayer",] - r_profile["Iron Maiden",])^2/ar_profile))
sqrt(sum((r_profile["Slayer",] - r_profile["Judas Priest",])^2/ar_profile))
sqrt(sum((r_profile["Slayer",] - ar_profile)^2/ar_profile))
sqrt(sum((r_profile["Iron Maiden",] - ar_profile)^2/ar_profile))
library("anacor")
ca_fans <- anacor(superfan, ellipse = TRUE)
ca_fans
plot(ca_fans, main = "Symmetric CA Map")
library("MPsychoR")
data("HarvardPsych")
dim(HarvardPsych) ## researchers in rows, words in columns
fit_HP <- anacor(HarvardPsych)
plot(fit_HP, main = "Harvard Psychology Faculty", asp = NULL, xlim = c(-4, 1))
plot(fit_HP, main = "Harvard Psychology Faculty (Zoom)", asp = NULL, xlim = c(0, 1), ylim = c(-1, 1))
## ------ multiple correspondence analysis
library("MPsychoR")
data("YouthDep")
cdisub <- YouthDep[, c("CDI15r", "CDI21r", "race")]
B <- burtTable(cdisub)
dim(B)
library("ca")
fit_mca <- mjca(cdisub, lambda = "Burt")
plot(fit_mca, xlim = c(-0.5, 0.5))
## ----- configural frequency analysis
library("cfa")
data("HarvardPsych")
configs <- expand.grid(dimnames(HarvardPsych))
counts <- as.vector(HarvardPsych)
fit.cfa <- cfa(configs, counts, binom.test = TRUE, sorton = "n")
types <- fit.cfa$table[fit.cfa$table$sig.bin == TRUE, 1:3]
head(types, 10)
countdf <- as.data.frame(table(cdisub))
fit.cdi <- cfa(countdf[,1:3], countdf[,4])
fit.cdi$table[fit.cdi$table$sig.chisq == TRUE, 1:3] #chi2-test
fit.cdi$table[fit.cdi$table$sig.z == TRUE, 1:3] ## z-test | /chapter07_CA.R | no_license | amaolong/MPsychoR | R | false | false | 4,196 | r | ## ----------------------- Chapter 7: Correspondence Analysis
## ----- simple correspondence analysis
superfan <- as.table(matrix(c(9, 12, 8, 1, 13, 1, 6, 20, 15, 4, 23, 18), ncol = 3))
attr(superfan, "dimnames") <- list(Band = c("Slayer", "Iron Maiden", "Metallica", "Judas Priest"),
Fan = c("Horst", "Helga", "Klaus"))
superfan
fit_chisq <- chisq.test(superfan)
fit_chisq
S <- fit_chisq$residuals
round(S, 3)
library("vcd")
mosaic(superfan, shade = TRUE)
P <- prop.table(superfan)
round(P, 3) ## table with relative frequencies
r_mass <- margin.table(P, 1)
round(r_mass, 3) ## row masses
c_mass <- margin.table(P, 2)
round(c_mass, 3) ## column masses
r_profile <- prop.table(P, 1)
round(r_profile, 3) ## conditional relative frequencies rows
c_profile <- prop.table(P, 2)
round(c_profile, 3) ## conditional relative frequencies columns
ar_profile <- t(r_profile) %*% r_mass ## average row profile
round(as.vector(ar_profile), 3)
round(as.vector(c_mass), 3) ## column masses
ac_profile <- c_profile %*% c_mass ## average column profile
round(as.vector(ac_profile), 3)
round(as.vector(r_mass), 3) ## row masses
library("plot3D")
tc <- r_profile
scatter3D(x = tc[,1], y = tc[,2], z = tc[,3], xlab = "Horst", ylab = "Helga", zlab = "Klaus", colkey = FALSE,
col = 1, pch = 20, xlim = c(0,1), ylim = c(0,1), zlim = c(0,1), ticktype = "simple", type = "h",
phi = 40, theta = 50, main = "Row Profiles", bty = "g")
points3D(x = c(0,0,1), y = c(0,1,0), z = c(1,0,0), col = "red", add = TRUE, pch = 20)
lines3D(x = c(0,0,1,0), y = c(0,1,0,0), z = c(1,0,0,1), col = "red", add = TRUE)
text3D(x = tc[,1], y = tc[,2], z = tc[,3], labels = rownames(tc), pos = 3, add = TRUE, cex = 0.8, adj = -0.1)
library("ggtern")
tf <- as.data.frame.matrix(superfan)
c_mass <- as.vector(c_mass)
lines <- data.frame(x = c(c_mass[1], 1-c_mass[3], 0),
y = c(1-c_mass[1], 0, c_mass[2]),
z = c(0, c_mass[3], 1-c_mass[2]),
xend = c(c_mass[1], c_mass[1], c_mass[1]),
yend = c(c_mass[2], c_mass[2], c_mass[2]),
zend = c(c_mass[3], c_mass[3], c_mass[3]), row.names = NULL)
gt <- ggtern(data = tf, aes(Horst, Helga, Klaus))
gt + geom_point() + theme_rgbw() + geom_text(label = rownames(tf), vjust = -0.5) +
geom_point(aes(x = c_mass[1], y = c_mass[2], z = c_mass[3]), colour = "red", size = 4) +
geom_segment(data = lines, aes(x = x, y = y, z = z, xend = xend, yend = yend, zend = zend),
color = 'red', size = 0.5) +
labs(title = "Ternary Plot")
## ------------------------------------------------------------------------
sqrt(sum((r_profile["Slayer",] - r_profile["Iron Maiden",])^2/ar_profile))
sqrt(sum((r_profile["Slayer",] - r_profile["Judas Priest",])^2/ar_profile))
sqrt(sum((r_profile["Slayer",] - ar_profile)^2/ar_profile))
sqrt(sum((r_profile["Iron Maiden",] - ar_profile)^2/ar_profile))
library("anacor")
ca_fans <- anacor(superfan, ellipse = TRUE)
ca_fans
plot(ca_fans, main = "Symmetric CA Map")
library("MPsychoR")
data("HarvardPsych")
dim(HarvardPsych) ## researchers in rows, words in columns
fit_HP <- anacor(HarvardPsych)
plot(fit_HP, main = "Harvard Psychology Faculty", asp = NULL, xlim = c(-4, 1))
plot(fit_HP, main = "Harvard Psychology Faculty (Zoom)", asp = NULL, xlim = c(0, 1), ylim = c(-1, 1))
## ------ multiple correspondence analysis
library("MPsychoR")
data("YouthDep")
cdisub <- YouthDep[, c("CDI15r", "CDI21r", "race")]
B <- burtTable(cdisub)
dim(B)
library("ca")
fit_mca <- mjca(cdisub, lambda = "Burt")
plot(fit_mca, xlim = c(-0.5, 0.5))
## ----- configural frequency analysis
library("cfa")
data("HarvardPsych")
configs <- expand.grid(dimnames(HarvardPsych))
counts <- as.vector(HarvardPsych)
fit.cfa <- cfa(configs, counts, binom.test = TRUE, sorton = "n")
types <- fit.cfa$table[fit.cfa$table$sig.bin == TRUE, 1:3]
head(types, 10)
countdf <- as.data.frame(table(cdisub))
fit.cdi <- cfa(countdf[,1:3], countdf[,4])
fit.cdi$table[fit.cdi$table$sig.chisq == TRUE, 1:3] #chi2-test
fit.cdi$table[fit.cdi$table$sig.z == TRUE, 1:3] ## z-test |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sm-algorithm.R
\name{r2b}
\alias{r2b}
\title{computing a norm value for the segment from point k1 to k2-1}
\usage{
r2b(k1, k2, x, y)
}
\arguments{
\item{k1}{[INTEGER] start point}
\item{k2}{[INTEGER] end point+1}
\item{x}{[REAL(?)] input x-axis array (predictor)}
\item{y}{[REAL(?)] input y-axis array (response)}
}
\value{
A -[REAL] coefficient of linear regression (will zero if K1=0) (y=Ax+B), B -[REAL] coefficient of linear regression, R2B -[REAL] norm of the segment (maximum "distance" measured perpendicular to the regression line)
}
\description{
A -[REAL] coefficient of linear regression (will zero if K1=0) (y=Ax+B), B -[REAL] coefficient of linear regression, R2B -[REAL] norm of the segment (maximum "distance" measured perpendicular to the regression line)
}
\examples{
ni <- c( 1, 201, 402 )
i <- 1
k1 <- ni[i]
k2 <- ni[i+1]
r2b(k1, k2, y=t11$temper, x=t11$depth)
}
\keyword{internal}
| /man/r2b.Rd | no_license | boshek/limnotools | R | false | true | 984 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sm-algorithm.R
\name{r2b}
\alias{r2b}
\title{computing a norm value for the segment from point k1 to k2-1}
\usage{
r2b(k1, k2, x, y)
}
\arguments{
\item{k1}{[INTEGER] start point}
\item{k2}{[INTEGER] end point+1}
\item{x}{[REAL(?)] input x-axis array (predictor)}
\item{y}{[REAL(?)] input y-axis array (response)}
}
\value{
A -[REAL] coefficient of linear regression (will zero if K1=0) (y=Ax+B), B -[REAL] coefficient of linear regression, R2B -[REAL] norm of the segment (maximum "distance" measured perpendicular to the regression line)
}
\description{
A -[REAL] coefficient of linear regression (will zero if K1=0) (y=Ax+B), B -[REAL] coefficient of linear regression, R2B -[REAL] norm of the segment (maximum "distance" measured perpendicular to the regression line)
}
\examples{
ni <- c( 1, 201, 402 )
i <- 1
k1 <- ni[i]
k2 <- ni[i+1]
r2b(k1, k2, y=t11$temper, x=t11$depth)
}
\keyword{internal}
|
# apagando dados ####
a <- list.files(path = "config", pattern = ".rds", full.names = T)
file.remove(a)
# configuracao ####
metadata <- as.data.frame(metadata)
mileage <- as.data.frame(mileage)
mileage[, metadata$vehicles] <- add_lkm(mileage[, metadata$vehicles])
tfs <- as.data.frame(tfs)
veh <- as.data.frame(veh)
fuel <- as.data.frame(fuel)
met <- as.data.frame(met)
pmonth <- as.data.frame(pmonth)
for(i in 2:ncol(pmonth)) {
pmonth[[i]] <- 100*pmonth[[i]] /sum(pmonth[[i]] )
}
# checkar metadata$vehicles ####
switch (language,
"portuguese" = cat( "Metadata$Vehicles é:\n"),
"english" = cat( "Metadata$Vehicles is:\n"),
"spanish" = cat( "Metadata$Vehicles es:\n"))
# cat( "Metadata$Vehicles é:\n")
print(metadata$vehicles)
# checar nomes mileage ####
if(!length(intersect(metadata$vehicles, names(mileage))) == length(metadata$vehicles)) {
switch (language,
"portuguese" = stop("Precisa adicionar coluna ",
setdiff(metadata$vehicles, names(mileage)),
" em `mileage`"),
"english" = stop("You need to add column ",
setdiff(metadata$vehicles, names(mileage)),
" in `mileage`"),
"spanish" = stop("Necesitas agregar la columna ",
setdiff(metadata$vehicles, names(mileage)),
" en `mileage`"))
}
# checar nomes tfs ####
if(!length(intersect(metadata$vehicles, names(tfs))) == length(metadata$vehicles)) {
switch (language,
"portuguese" = stop("Precisa adicionar coluna ",
setdiff(metadata$vehicles, names(mileage)),
" em `tfs`"),
"english" = stop("You need to add column ",
setdiff(metadata$vehicles, names(mileage)),
" in `tfs`"),
"spanish" = stop("Necesitas agregar la columna ",
setdiff(metadata$vehicles, names(mileage)),
" en `tfs`"))
}
# checar nomes veh ####
if(!length(intersect(metadata$vehicles, names(veh))) == length(metadata$vehicles)) {
switch (language,
"portuguese" = stop("Precisa adicionar coluna ",
setdiff(metadata$vehicles, names(mileage)),
" em `veh`"),
"english" = stop("You need to add column ",
setdiff(metadata$vehicles, names(mileage)),
" in `veh`"),
"spanish" = stop("Necesitas agregar la columna ",
setdiff(metadata$vehicles, names(mileage)),
" en `veh`"))
}
#checar Year ####
if(!"Year" %in% names(veh)){
switch (language,
"portuguese" = stop("Não estou enxergando a coluna 'Year' em `veh`"),
"english" = stop("I'm not seeing column 'Year' in `veh`"),
"spanish" = stop("No estoy viendo la columna 'Year' in `veh`"))
}
if(!"Year" %in% names(mileage)) {
switch (language,
"portuguese" = stop("Não estou enxergando a coluna 'Year' em `mileage`"),
"english" = stop("I'm not seeing column 'Year' in `mileage`"),
"spanish" = stop("No estoy viendo la columna 'Year' in `mileage`"))
}
# checar ano base
if(veh$Year[1] != year) {
switch (language,
"portuguese" = stop(paste0("O ano base é ", year, " mas o primeiro ano em `veh` é ", veh$Year[1])),
"english" = stop(paste0("The base year is ", year, " but the first year in `veh` is ", veh$Year[1])),
"spanish" = stop(paste0("El año base es ", year, " pero el primer año de `veh` es ", veh$Year[1])))
}
if(mileage$Year[1] != year) {
switch (language,
"portuguese" = stop(paste0("O ano base é ", year, " mas o primeiro ano em `mileage` é ", mileage$Year[1])),
"english" = stop(paste0("The base year is ", year, " but the first year in `mileage` is ", veh$Year[1])),
"spanish" = stop(paste0("El año base es ", year, " pero el primer año de `mileage` es ", mileage$Year[1])))
}
switch (language,
"portuguese" = message("Arquivos em: ", getwd(), "/config/*\n"),
"english" = message("Files in: ", getwd(), "/config/*\n"),
"spanish" = message("Archivos en: ", getwd(), "/config/*\n"))
saveRDS(metadata, "config/metadata.rds")
saveRDS(mileage, "config/mileage.rds")
saveRDS(tfs, "config/tfs.rds")
saveRDS(veh, "config/fleet_age.rds")
saveRDS(fuel, "config/fuel.rds")
saveRDS(met, "config/met.rds")
saveRDS(pmonth, "config/pmonth.rds")
# pastas
if(delete_directories){
choice <- 1
if(language == "portuguese") {
# choice <- utils::menu(c("Sim", "Não"), title="Apagar pastas csv, emi, images, notes, post e veh??")
if(choice == 1){
message("Apagando pastas `emi`, `images`, `notes`, `post` e `veh`")
unlink("csv", recursive = T)
unlink("emi", recursive = T)
unlink("images", recursive = T)
unlink("notes", recursive = T)
unlink("post", recursive = T)
unlink("veh", recursive = T)
}
} else if(language == "english"){
# choice <- utils::menu(c("Yes", "No"), title="Delete folders `csv`, `emi`, `images`, `notes`, `post` e `veh`??")
if(choice == 1){
message("Deleting folders `emi`, `images`, `notes`, `post` and `veh`")
unlink("csv", recursive = T)
unlink("emi", recursive = T)
unlink("images", recursive = T)
unlink("notes", recursive = T)
unlink("post", recursive = T)
unlink("veh", recursive = T)
}
} else if (language == "chinese") {
# choice <- utils::menu(c("是的", "没有"), title="删除文件夹 `csv`, `emi`, `images`, `notes`, `post`, `veh`??")
if(choice == 1){
message("删除文件夹 `emi`, `images`, `notes`, `post`, `veh`")
unlink("csv", recursive = T)
unlink("emi", recursive = T)
unlink("images", recursive = T)
unlink("notes", recursive = T)
unlink("post", recursive = T)
unlink("veh", recursive = T)
}
} else if(language == "spanish"){
# choice <- utils::menu(c("Si", "No"), title="Borrar carpetas `csv`, `emi`, `images`, `notes`, `post` y `veh`??")
if(choice == 1){
message("Borrando carpetas `emi`, `images`, `notes`, `post` y `veh`")
unlink("csv", recursive = T)
unlink("emi", recursive = T)
unlink("notes", recursive = T)
unlink("images", recursive = T)
unlink("post", recursive = T)
unlink("veh", recursive = T)
}
}
}
dir.create(path = "csv", showWarnings = FALSE)
dir.create(path = "emi", showWarnings = FALSE)
dir.create(path = "images", showWarnings = FALSE)
dir.create(path = "notes", showWarnings = FALSE)
dir.create(path = "post", showWarnings = FALSE)
dir.create(path = "post/datatable", showWarnings = FALSE)
dir.create(path = "post/streets", showWarnings = FALSE)
dir.create(path = "post/grids", showWarnings = FALSE)
dir.create(path = "veh", showWarnings = FALSE)
for(i in seq_along(metadata$vehicles)) dir.create(path = paste0("emi/", metadata$vehicles[i]))
pa <- list.dirs(path = "emi", full.names = T, recursive = T)
po <- list.dirs("post", full.names = T, recursive = T)
switch (language,
"portuguese" = message("Novas pastas:"),
"english" = message("New folders:"),
"chinese" = message("文件位于: ", getwd(), "/config/*\n"),
"spanish" = message("Nuevas carpetas"))
message("csv\n")
message("images\n")
message(paste0(po,"\n"))
message(paste0(pa,"\n"))
message("veh\n")
# names groups ####
n_PC <- metadata$vehicles[grep(pattern = "PC", x = metadata$vehicles)]
n_LCV <- metadata$vehicles[grep(pattern = "LCV", x = metadata$vehicles)]
n_TRUCKS <- metadata$vehicles[grep(pattern = "TRUCKS", x = metadata$vehicles)]
n_BUS <- metadata$vehicles[grep(pattern = "BUS", x = metadata$vehicles)]
n_MC <- metadata$vehicles[grep(pattern = "MC", x = metadata$vehicles)]
n_veh <- list(PC = n_PC,
LCV = n_LCV,
TRUCKS = n_TRUCKS,
BUS = n_BUS,
MC = n_MC)
# Fleet ####
switch (language,
"portuguese" = cat("Plotando frota \n"),
"english" = cat("Plotting fleet \n"),
"chinese" = cat("密谋舰队 \n"),
"spanish" = cat("Plotando flota \n"))
for(i in seq_along(n_veh)) {
df_x <- veh[, n_veh[[i]]]
png(
paste0("images/FLEET_",
names(n_veh)[i],
".png"),
2000, 1500, "px",res = 300)
colplot(df = df_x,
cols = n_veh[[i]],
xlab = "Age",
ylab = "veh/h",
main = names(n_veh)[i],
type = "l",
pch = NULL,
lwd =1,
theme = theme,
spl = 8)
dev.off()
}
#TFS ####
switch (language,
"portuguese" = cat("Plotando perfis `tfs`\n"),
"english" = cat("Plotting profiles `tfs`\n"),
"chinese" = cat("绘制轮廓 `tfs`\n"),
"spanish" = cat("Plotando perfiles `tfs`\n"))
for(i in seq_along(n_veh)) {
df_x <- tfs[, n_veh[[i]]]
png(
paste0("images/TFS_",
names(n_veh)[i],
".png"),
2000, 1500, "px",res = 300)
colplot(df = df_x,
cols = n_veh[[i]],
xlab = "Hour",
ylab = "",
main = paste0("TFS ", names(n_veh)[i]),
type = "l",
pch = NULL,
lwd =1,
theme = theme,
spl = 8)
dev.off()
}
# Mileage ####
switch (language,
"portuguese" = cat("Plotando quilometragem \n"),
"english" = cat("Plotting mileage `tfs`\n"),
"chinese" = cat("绘制里程 `tfs`\n"),
"spanish" = cat("Plotando kilometraje `tfs`\n"))
for(i in seq_along(n_veh)) {
df_x <- mileage[, n_veh[[i]]]
png(
paste0("images/MILEAGE_",
names(n_veh)[i],
".png"),
2000, 1500, "px",res = 300)
colplot(df = df_x,
cols = n_veh[[i]],
xlab = "Age of use",
ylab = "[km/year]",
main = paste0("Mileage ", names(n_veh)[i]),
type = "l",
pch = NULL,
lwd =1,
theme = theme,
spl = 8)
dev.off()
}
# Temperature ####
units(celsius(1))$numerator
png("images/Temperature.png",
2000, 1500, "px",res = 300)
colplot(df = met,
cols = "Temperature",
xlab = "Months",
ylab = units(celsius(1))$numerator,
main = "Temperature",
type = "l",
pch = NULL,
lwd =1,
theme = theme,
spl = 8)
dev.off()
# month
for(i in seq_along(n_veh)) {
df_x <- pmonth[, n_veh[[i]]]
png(
paste0("images/PMONTH_",
names(n_veh)[i],
".png"),
2000, 1500, "px",res = 300)
colplot(df = df_x,
cols = n_veh[[i]],
xlab = "Month",
ylab = "%",
main = names(n_veh)[i],
type = "l",
pch = NULL,
lwd =1,
theme = theme,
spl = 8)
dev.off()
}
# Notes ####
switch (language,
"portuguese" = cat("\nFazendo anotações\n"),
"english" = cat("\nTaking some notes\n"),
"chinese" = cat("\n做笔记\n"),
"spanish" = cat("\nEscribiendo notas\n"))
vein_notes(notes = c("Default notes for vein::get_project"),
file = "notes/README",
title = paste0("Metropolitan Area of São Paulo (MASP) ", year),
approach = 'Bottom-up',
traffic = "Samples of travel demand models for MASP",
composition = "CETESB",
ef = paste0("CETESB ", scale),
cold_start = "Not Applicable",
evaporative = "Running Losses, Diurnal and Hot Soak",
standards = "PROCONVE, PROMOT",
mileage = "Bruni and Bales 2013")
# saveRDS
switch (language,
"portuguese" = message("\nArquivos em:"),
"english" = message("\nFiles in:"),
"chinese" = message("\n文件位于:"),
"spanish" = message("\nArchivos en:"))
message("config/metadata.rds\n",
"config/mileage.rds\n",
"config/tfs.rds\n",
"config/fleet_age.rds\n",
"config/fuel.rds\n")
switch (language,
"portuguese" = message("\nFiguras em \n"),
"english" = message("\nFigures in \n"),
"chinese" = message("\n在数字\n"),
"spanish" = message("\nFiguras en \n"))
message("/images")
switch (language,
"portuguese" = message("Limpando..."),
"english" = message("Cleaning..."),
"chinese" = message("清洁用品..."),
"spanish" = message("Limpiando..."))
suppressWarnings(
rm(i, choice, pa, metadata, po, tfs, veh, mileage, fuel, theme,
n_PC, n_LCV, n_TRUCKS, n_BUS, n_MC, df_x, ef, cores, vkm, ef, a, rota)
)
| /projects/brazil_td_country/config/config.R | no_license | gitter-badger/vein | R | false | false | 12,714 | r | # apagando dados ####
a <- list.files(path = "config", pattern = ".rds", full.names = T)
file.remove(a)
# configuracao ####
metadata <- as.data.frame(metadata)
mileage <- as.data.frame(mileage)
mileage[, metadata$vehicles] <- add_lkm(mileage[, metadata$vehicles])
tfs <- as.data.frame(tfs)
veh <- as.data.frame(veh)
fuel <- as.data.frame(fuel)
met <- as.data.frame(met)
pmonth <- as.data.frame(pmonth)
for(i in 2:ncol(pmonth)) {
pmonth[[i]] <- 100*pmonth[[i]] /sum(pmonth[[i]] )
}
# checkar metadata$vehicles ####
switch (language,
"portuguese" = cat( "Metadata$Vehicles é:\n"),
"english" = cat( "Metadata$Vehicles is:\n"),
"spanish" = cat( "Metadata$Vehicles es:\n"))
# cat( "Metadata$Vehicles é:\n")
print(metadata$vehicles)
# checar nomes mileage ####
if(!length(intersect(metadata$vehicles, names(mileage))) == length(metadata$vehicles)) {
switch (language,
"portuguese" = stop("Precisa adicionar coluna ",
setdiff(metadata$vehicles, names(mileage)),
" em `mileage`"),
"english" = stop("You need to add column ",
setdiff(metadata$vehicles, names(mileage)),
" in `mileage`"),
"spanish" = stop("Necesitas agregar la columna ",
setdiff(metadata$vehicles, names(mileage)),
" en `mileage`"))
}
# checar nomes tfs ####
if(!length(intersect(metadata$vehicles, names(tfs))) == length(metadata$vehicles)) {
switch (language,
"portuguese" = stop("Precisa adicionar coluna ",
setdiff(metadata$vehicles, names(mileage)),
" em `tfs`"),
"english" = stop("You need to add column ",
setdiff(metadata$vehicles, names(mileage)),
" in `tfs`"),
"spanish" = stop("Necesitas agregar la columna ",
setdiff(metadata$vehicles, names(mileage)),
" en `tfs`"))
}
# checar nomes veh ####
if(!length(intersect(metadata$vehicles, names(veh))) == length(metadata$vehicles)) {
switch (language,
"portuguese" = stop("Precisa adicionar coluna ",
setdiff(metadata$vehicles, names(mileage)),
" em `veh`"),
"english" = stop("You need to add column ",
setdiff(metadata$vehicles, names(mileage)),
" in `veh`"),
"spanish" = stop("Necesitas agregar la columna ",
setdiff(metadata$vehicles, names(mileage)),
" en `veh`"))
}
#checar Year ####
if(!"Year" %in% names(veh)){
switch (language,
"portuguese" = stop("Não estou enxergando a coluna 'Year' em `veh`"),
"english" = stop("I'm not seeing column 'Year' in `veh`"),
"spanish" = stop("No estoy viendo la columna 'Year' in `veh`"))
}
if(!"Year" %in% names(mileage)) {
switch (language,
"portuguese" = stop("Não estou enxergando a coluna 'Year' em `mileage`"),
"english" = stop("I'm not seeing column 'Year' in `mileage`"),
"spanish" = stop("No estoy viendo la columna 'Year' in `mileage`"))
}
# checar ano base
if(veh$Year[1] != year) {
switch (language,
"portuguese" = stop(paste0("O ano base é ", year, " mas o primeiro ano em `veh` é ", veh$Year[1])),
"english" = stop(paste0("The base year is ", year, " but the first year in `veh` is ", veh$Year[1])),
"spanish" = stop(paste0("El año base es ", year, " pero el primer año de `veh` es ", veh$Year[1])))
}
if(mileage$Year[1] != year) {
switch (language,
"portuguese" = stop(paste0("O ano base é ", year, " mas o primeiro ano em `mileage` é ", mileage$Year[1])),
"english" = stop(paste0("The base year is ", year, " but the first year in `mileage` is ", veh$Year[1])),
"spanish" = stop(paste0("El año base es ", year, " pero el primer año de `mileage` es ", mileage$Year[1])))
}
switch (language,
"portuguese" = message("Arquivos em: ", getwd(), "/config/*\n"),
"english" = message("Files in: ", getwd(), "/config/*\n"),
"spanish" = message("Archivos en: ", getwd(), "/config/*\n"))
saveRDS(metadata, "config/metadata.rds")
saveRDS(mileage, "config/mileage.rds")
saveRDS(tfs, "config/tfs.rds")
saveRDS(veh, "config/fleet_age.rds")
saveRDS(fuel, "config/fuel.rds")
saveRDS(met, "config/met.rds")
saveRDS(pmonth, "config/pmonth.rds")
# pastas
if(delete_directories){
choice <- 1
if(language == "portuguese") {
# choice <- utils::menu(c("Sim", "Não"), title="Apagar pastas csv, emi, images, notes, post e veh??")
if(choice == 1){
message("Apagando pastas `emi`, `images`, `notes`, `post` e `veh`")
unlink("csv", recursive = T)
unlink("emi", recursive = T)
unlink("images", recursive = T)
unlink("notes", recursive = T)
unlink("post", recursive = T)
unlink("veh", recursive = T)
}
} else if(language == "english"){
# choice <- utils::menu(c("Yes", "No"), title="Delete folders `csv`, `emi`, `images`, `notes`, `post` e `veh`??")
if(choice == 1){
message("Deleting folders `emi`, `images`, `notes`, `post` and `veh`")
unlink("csv", recursive = T)
unlink("emi", recursive = T)
unlink("images", recursive = T)
unlink("notes", recursive = T)
unlink("post", recursive = T)
unlink("veh", recursive = T)
}
} else if (language == "chinese") {
# choice <- utils::menu(c("是的", "没有"), title="删除文件夹 `csv`, `emi`, `images`, `notes`, `post`, `veh`??")
if(choice == 1){
message("删除文件夹 `emi`, `images`, `notes`, `post`, `veh`")
unlink("csv", recursive = T)
unlink("emi", recursive = T)
unlink("images", recursive = T)
unlink("notes", recursive = T)
unlink("post", recursive = T)
unlink("veh", recursive = T)
}
} else if(language == "spanish"){
# choice <- utils::menu(c("Si", "No"), title="Borrar carpetas `csv`, `emi`, `images`, `notes`, `post` y `veh`??")
if(choice == 1){
message("Borrando carpetas `emi`, `images`, `notes`, `post` y `veh`")
unlink("csv", recursive = T)
unlink("emi", recursive = T)
unlink("notes", recursive = T)
unlink("images", recursive = T)
unlink("post", recursive = T)
unlink("veh", recursive = T)
}
}
}
dir.create(path = "csv", showWarnings = FALSE)
dir.create(path = "emi", showWarnings = FALSE)
dir.create(path = "images", showWarnings = FALSE)
dir.create(path = "notes", showWarnings = FALSE)
dir.create(path = "post", showWarnings = FALSE)
dir.create(path = "post/datatable", showWarnings = FALSE)
dir.create(path = "post/streets", showWarnings = FALSE)
dir.create(path = "post/grids", showWarnings = FALSE)
dir.create(path = "veh", showWarnings = FALSE)
for(i in seq_along(metadata$vehicles)) dir.create(path = paste0("emi/", metadata$vehicles[i]))
pa <- list.dirs(path = "emi", full.names = T, recursive = T)
po <- list.dirs("post", full.names = T, recursive = T)
switch (language,
"portuguese" = message("Novas pastas:"),
"english" = message("New folders:"),
"chinese" = message("文件位于: ", getwd(), "/config/*\n"),
"spanish" = message("Nuevas carpetas"))
message("csv\n")
message("images\n")
message(paste0(po,"\n"))
message(paste0(pa,"\n"))
message("veh\n")
# names groups ####
n_PC <- metadata$vehicles[grep(pattern = "PC", x = metadata$vehicles)]
n_LCV <- metadata$vehicles[grep(pattern = "LCV", x = metadata$vehicles)]
n_TRUCKS <- metadata$vehicles[grep(pattern = "TRUCKS", x = metadata$vehicles)]
n_BUS <- metadata$vehicles[grep(pattern = "BUS", x = metadata$vehicles)]
n_MC <- metadata$vehicles[grep(pattern = "MC", x = metadata$vehicles)]
n_veh <- list(PC = n_PC,
LCV = n_LCV,
TRUCKS = n_TRUCKS,
BUS = n_BUS,
MC = n_MC)
# Fleet ####
switch (language,
"portuguese" = cat("Plotando frota \n"),
"english" = cat("Plotting fleet \n"),
"chinese" = cat("密谋舰队 \n"),
"spanish" = cat("Plotando flota \n"))
for(i in seq_along(n_veh)) {
df_x <- veh[, n_veh[[i]]]
png(
paste0("images/FLEET_",
names(n_veh)[i],
".png"),
2000, 1500, "px",res = 300)
colplot(df = df_x,
cols = n_veh[[i]],
xlab = "Age",
ylab = "veh/h",
main = names(n_veh)[i],
type = "l",
pch = NULL,
lwd =1,
theme = theme,
spl = 8)
dev.off()
}
#TFS ####
switch (language,
"portuguese" = cat("Plotando perfis `tfs`\n"),
"english" = cat("Plotting profiles `tfs`\n"),
"chinese" = cat("绘制轮廓 `tfs`\n"),
"spanish" = cat("Plotando perfiles `tfs`\n"))
for(i in seq_along(n_veh)) {
df_x <- tfs[, n_veh[[i]]]
png(
paste0("images/TFS_",
names(n_veh)[i],
".png"),
2000, 1500, "px",res = 300)
colplot(df = df_x,
cols = n_veh[[i]],
xlab = "Hour",
ylab = "",
main = paste0("TFS ", names(n_veh)[i]),
type = "l",
pch = NULL,
lwd =1,
theme = theme,
spl = 8)
dev.off()
}
# Mileage ####
switch (language,
"portuguese" = cat("Plotando quilometragem \n"),
"english" = cat("Plotting mileage `tfs`\n"),
"chinese" = cat("绘制里程 `tfs`\n"),
"spanish" = cat("Plotando kilometraje `tfs`\n"))
for(i in seq_along(n_veh)) {
df_x <- mileage[, n_veh[[i]]]
png(
paste0("images/MILEAGE_",
names(n_veh)[i],
".png"),
2000, 1500, "px",res = 300)
colplot(df = df_x,
cols = n_veh[[i]],
xlab = "Age of use",
ylab = "[km/year]",
main = paste0("Mileage ", names(n_veh)[i]),
type = "l",
pch = NULL,
lwd =1,
theme = theme,
spl = 8)
dev.off()
}
# Temperature ####
units(celsius(1))$numerator
png("images/Temperature.png",
2000, 1500, "px",res = 300)
colplot(df = met,
cols = "Temperature",
xlab = "Months",
ylab = units(celsius(1))$numerator,
main = "Temperature",
type = "l",
pch = NULL,
lwd =1,
theme = theme,
spl = 8)
dev.off()
# month
for(i in seq_along(n_veh)) {
df_x <- pmonth[, n_veh[[i]]]
png(
paste0("images/PMONTH_",
names(n_veh)[i],
".png"),
2000, 1500, "px",res = 300)
colplot(df = df_x,
cols = n_veh[[i]],
xlab = "Month",
ylab = "%",
main = names(n_veh)[i],
type = "l",
pch = NULL,
lwd =1,
theme = theme,
spl = 8)
dev.off()
}
# Notes ####
switch (language,
"portuguese" = cat("\nFazendo anotações\n"),
"english" = cat("\nTaking some notes\n"),
"chinese" = cat("\n做笔记\n"),
"spanish" = cat("\nEscribiendo notas\n"))
vein_notes(notes = c("Default notes for vein::get_project"),
file = "notes/README",
title = paste0("Metropolitan Area of São Paulo (MASP) ", year),
approach = 'Bottom-up',
traffic = "Samples of travel demand models for MASP",
composition = "CETESB",
ef = paste0("CETESB ", scale),
cold_start = "Not Applicable",
evaporative = "Running Losses, Diurnal and Hot Soak",
standards = "PROCONVE, PROMOT",
mileage = "Bruni and Bales 2013")
# saveRDS
switch (language,
"portuguese" = message("\nArquivos em:"),
"english" = message("\nFiles in:"),
"chinese" = message("\n文件位于:"),
"spanish" = message("\nArchivos en:"))
message("config/metadata.rds\n",
"config/mileage.rds\n",
"config/tfs.rds\n",
"config/fleet_age.rds\n",
"config/fuel.rds\n")
switch (language,
"portuguese" = message("\nFiguras em \n"),
"english" = message("\nFigures in \n"),
"chinese" = message("\n在数字\n"),
"spanish" = message("\nFiguras en \n"))
message("/images")
switch (language,
"portuguese" = message("Limpando..."),
"english" = message("Cleaning..."),
"chinese" = message("清洁用品..."),
"spanish" = message("Limpiando..."))
suppressWarnings(
rm(i, choice, pa, metadata, po, tfs, veh, mileage, fuel, theme,
n_PC, n_LCV, n_TRUCKS, n_BUS, n_MC, df_x, ef, cores, vkm, ef, a, rota)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shape.R
\name{quad}
\alias{quad}
\title{Draw a Quadrilateral}
\usage{
quad(sketch, x1 = x1, y1 = y1, x2 = x2, y2 = y2, x3 = x3, y3 = y3,
x4 = x4, y4 = y4)
}
\arguments{
\item{sketch}{A p5 sketch.}
\item{x1}{The x coordinate for the first point.}
\item{y1}{The y coordinate for the first point.}
\item{x2}{The x coordinate for the second point.}
\item{y2}{The y coordinate for the second point.}
\item{x3}{The x coordinate for the third point.}
\item{y3}{The y coordinate for the third point.}
\item{x4}{The x coordinate for the fourth point.}
\item{y4}{The y coordinate for the fourth point.}
}
\description{
Draw a Quadrilateral
}
\examples{
\dontrun{
p5() \%>\%
quad(40, 30, 85, 25, 70, 65, 35, 75)
}
}
\seealso{
Other Shape 2D Primitives: \code{\link{arc}},
\code{\link{ellipse}}, \code{\link{line}},
\code{\link{point}}, \code{\link{rect}},
\code{\link{triangle}}
}
| /man/quad.Rd | no_license | bedantaguru/p5 | R | false | true | 968 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shape.R
\name{quad}
\alias{quad}
\title{Draw a Quadrilateral}
\usage{
quad(sketch, x1 = x1, y1 = y1, x2 = x2, y2 = y2, x3 = x3, y3 = y3,
x4 = x4, y4 = y4)
}
\arguments{
\item{sketch}{A p5 sketch.}
\item{x1}{The x coordinate for the first point.}
\item{y1}{The y coordinate for the first point.}
\item{x2}{The x coordinate for the second point.}
\item{y2}{The y coordinate for the second point.}
\item{x3}{The x coordinate for the third point.}
\item{y3}{The y coordinate for the third point.}
\item{x4}{The x coordinate for the fourth point.}
\item{y4}{The y coordinate for the fourth point.}
}
\description{
Draw a Quadrilateral
}
\examples{
\dontrun{
p5() \%>\%
quad(40, 30, 85, 25, 70, 65, 35, 75)
}
}
\seealso{
Other Shape 2D Primitives: \code{\link{arc}},
\code{\link{ellipse}}, \code{\link{line}},
\code{\link{point}}, \code{\link{rect}},
\code{\link{triangle}}
}
|
# https://community.watsonanalytics.com/watson-analytics-blog/predictive-insights-in-the-telco-customer-churn-data-set/
# https://community.watsonanalytics.com/wp-content/uploads/2015/03/WA_Fn-UseC_-Telco-Customer-Churn.csv
library(readr)
library(dplyr)
library(survival)
library(magrittr)
read_csv("ibm/WA_Fn-UseC_-Telco-Customer-Churn.csv") %>%
tbl_df %>%
mutate(censor = Churn == "Yes") %>%
survfit(Surv(tenure, censor) ~ 1, data = .) %T>%
# too smooth to be real subscription data
plot(mark.time = F) %>%
summary() | /ibm/ibm-churn.R | no_license | statwonk/survival-problems | R | false | false | 531 | r | # https://community.watsonanalytics.com/watson-analytics-blog/predictive-insights-in-the-telco-customer-churn-data-set/
# https://community.watsonanalytics.com/wp-content/uploads/2015/03/WA_Fn-UseC_-Telco-Customer-Churn.csv
library(readr)
library(dplyr)
library(survival)
library(magrittr)
read_csv("ibm/WA_Fn-UseC_-Telco-Customer-Churn.csv") %>%
tbl_df %>%
mutate(censor = Churn == "Yes") %>%
survfit(Surv(tenure, censor) ~ 1, data = .) %T>%
# too smooth to be real subscription data
plot(mark.time = F) %>%
summary() |
library(santaR)
### Name: get_eigen_spline
### Title: Compute eigenSplines across a dataset
### Aliases: get_eigen_spline
### ** Examples
## 7 measurements, 3 subjects, 4 unique time-points, 2 variables
inputData <- matrix(c(1,2,3,4,5,6,7,8,9 ,10,11,12,13,14,15,16,17,18), ncol=2)
ind <- c('ind_1','ind_1','ind_1','ind_2','ind_2','ind_2','ind_3','ind_3','ind_3')
time <- c(0,5,10,0,10,15,5,10,15)
get_eigen_spline(inputData, ind, time, nPC=NA, scaling="scaling_UV", method="nipals",
verbose=TRUE, centering=TRUE, ncores=0)
# nipals calculated PCA
# Importance of component(s):
# PC1 PC2 PC3
# R2 0.7113 0.2190 0.05261
# Cumulative R2 0.7113 0.9303 0.98287
# total time: 0.12 secs
# $matrix
# 0 5 10 15
# PC1 -1.7075707 -0.7066426 0.7075708 1.7066425
# PC2 -0.3415271 0.9669724 1.0944005 -0.4297013
# PC3 -0.1764657 -0.5129981 0.5110671 0.1987611
#
# $variance
# [1] 0.71126702 0.21899068 0.05260949
#
# $model
# nipals calculated PCA
# Importance of component(s):
# PC1 PC2 PC3
# R2 0.7113 0.2190 0.05261
# Cumulative R2 0.7113 0.9303 0.98287
# 6 Variables
# 4 Samples
# 6 NAs ( 25 %)
# 3 Calculated component(s)
# Data was mean centered before running PCA
# Data was NOT scaled before running PCA
# Scores structure:
# [1] 4 3
# Loadings structure:
# [1] 6 3
#
# $countTP
# [,1]
# 3 6
| /data/genthat_extracted_code/santaR/examples/get_eigen_spline.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,436 | r | library(santaR)
### Name: get_eigen_spline
### Title: Compute eigenSplines across a dataset
### Aliases: get_eigen_spline
### ** Examples
## 7 measurements, 3 subjects, 4 unique time-points, 2 variables
inputData <- matrix(c(1,2,3,4,5,6,7,8,9 ,10,11,12,13,14,15,16,17,18), ncol=2)
ind <- c('ind_1','ind_1','ind_1','ind_2','ind_2','ind_2','ind_3','ind_3','ind_3')
time <- c(0,5,10,0,10,15,5,10,15)
get_eigen_spline(inputData, ind, time, nPC=NA, scaling="scaling_UV", method="nipals",
verbose=TRUE, centering=TRUE, ncores=0)
# nipals calculated PCA
# Importance of component(s):
# PC1 PC2 PC3
# R2 0.7113 0.2190 0.05261
# Cumulative R2 0.7113 0.9303 0.98287
# total time: 0.12 secs
# $matrix
# 0 5 10 15
# PC1 -1.7075707 -0.7066426 0.7075708 1.7066425
# PC2 -0.3415271 0.9669724 1.0944005 -0.4297013
# PC3 -0.1764657 -0.5129981 0.5110671 0.1987611
#
# $variance
# [1] 0.71126702 0.21899068 0.05260949
#
# $model
# nipals calculated PCA
# Importance of component(s):
# PC1 PC2 PC3
# R2 0.7113 0.2190 0.05261
# Cumulative R2 0.7113 0.9303 0.98287
# 6 Variables
# 4 Samples
# 6 NAs ( 25 %)
# 3 Calculated component(s)
# Data was mean centered before running PCA
# Data was NOT scaled before running PCA
# Scores structure:
# [1] 4 3
# Loadings structure:
# [1] 6 3
#
# $countTP
# [,1]
# 3 6
|
data <- read.table(file = "C:/Users/Asus/Documents/R/coursera course 4 week 1/household_power_consumption.txt" , header = TRUE , sep = ";")
data <- subset(data , Date %in% c("1/2/2007" , "2/2/2007"))
png("C:/Users/Asus/Documents/R/coursera course 4 week 1/Plot1.png")
data$Global_active_power <- as.numeric(data$Global_active_power)
hist(data$Global_active_power , col = "red" , main = "Global Active Power" , xlab = "Global Active Power (Kilowatts)" , ylab = "Frequency")
dev.off() | /Plot1.R | no_license | dakshchordiya/ExData_Plotting1 | R | false | false | 491 | r | data <- read.table(file = "C:/Users/Asus/Documents/R/coursera course 4 week 1/household_power_consumption.txt" , header = TRUE , sep = ";")
data <- subset(data , Date %in% c("1/2/2007" , "2/2/2007"))
png("C:/Users/Asus/Documents/R/coursera course 4 week 1/Plot1.png")
data$Global_active_power <- as.numeric(data$Global_active_power)
hist(data$Global_active_power , col = "red" , main = "Global Active Power" , xlab = "Global Active Power (Kilowatts)" , ylab = "Frequency")
dev.off() |
df <- read.csv("https://raw.githubusercontent.com/info201b-au20/info201finalGEKK/gh-pages/2013-2020%20Police%20Killings-Table%201.csv")
View(df)
library(tidyverse)
library(ggplot2)
library(dplyr)
grouped <- df %>%
group_by(Victim.s.race)
View(grouped)
#pull(grouped)
| /scripts/table.R | permissive | info201b-au20/info201finalGEKK | R | false | false | 270 | r | df <- read.csv("https://raw.githubusercontent.com/info201b-au20/info201finalGEKK/gh-pages/2013-2020%20Police%20Killings-Table%201.csv")
View(df)
library(tidyverse)
library(ggplot2)
library(dplyr)
grouped <- df %>%
group_by(Victim.s.race)
View(grouped)
#pull(grouped)
|
#### Coursera Exploratory Data Analysis Project 1 - Plot 2
data <- read.csv("household_power_consumption.txt", sep=";", na.strings="?") #loads dataset
sub <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007") #subset data
sub$Date <- strptime(paste(sub$Date, sub$Time), "%d/%m/%Y %H:%M:%S") #converts/combines date and time
with(sub, plot(Date, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.copy(png, file = "plot2.png", height=480, width=480) # Copy plot to a PNG file
dev.off() | /plot2.R | no_license | peakgeek/ExData_Plotting1 | R | false | false | 537 | r | #### Coursera Exploratory Data Analysis Project 1 - Plot 2
data <- read.csv("household_power_consumption.txt", sep=";", na.strings="?") #loads dataset
sub <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007") #subset data
sub$Date <- strptime(paste(sub$Date, sub$Time), "%d/%m/%Y %H:%M:%S") #converts/combines date and time
with(sub, plot(Date, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.copy(png, file = "plot2.png", height=480, width=480) # Copy plot to a PNG file
dev.off() |
# block --- convert characters on STDIN to block letters on STDOUT
include "block_def.r.i"
character line (MAXLINE), outbuf (MAX_OUT), c, fill_char
integer in, out, row, col, mask, out_width, next_in
integer getlin
include MASK_DECL
call get_options (fill_char, out_width)
while (getlin (line, STDIN) ~= EOF) {
next_in = 1
repeat {
for (row = 0; row < ROWS_PER_CHAR; row += 1) {
do col = 1, out_width
outbuf (col) = ' 'c
out = 1
for (in = next_in; line (in) ~= EOS; in += 1) {
c = line (in)
if (c == BS) { # handle overstrikes
out -= COLS_PER_CHAR + COLS_BETWEEN_CHARS
next
}
elif (c < ' 'c) # ignore other control characters
next
else
mask = rs (masks (row / ROWS_PER_WORD + 1, c - ' 'c + 1),
mod (row, ROWS_PER_WORD) * COLS_PER_CHAR)
if (0 < out && out <= out_width - COLS_PER_CHAR) {
do col = 1, COLS_PER_CHAR; {
if (and (mask, 1) ~= 0)
outbuf (out) = fill_char
out += 1
mask = rs (mask, 1)
}
out += COLS_BETWEEN_CHARS
}
else
break
}
outbuf (out_width + 1) = EOS
call strim (outbuf)
call print (STDOUT, "*s*n"s, outbuf)
}
next_in = in
do row = 1, ROWS_BETWEEN_LINES
call putch (NEWLINE, STDOUT)
} until (line (next_in) == EOS)
}
stop
end
# get_options --- parse command line arguments for block
subroutine get_options (fill_char, out_width)
character fill_char
integer out_width
ARG_DECL
PARSE_COMMAND_LINE ("c<rs>w<ri>"s, _
"Usage: block { -c <char> | -w <width> }"s)
if (ARG_PRESENT (c))
fill_char = ARG_TEXT (c)
else
fill_char = '*'c
if (ARG_PRESENT (w))
out_width = ARG_VALUE (w)
else
out_width = 75
return
end
| /swt/src/lcl/std.r/block.r | no_license | arnoldrobbins/gt-swt | R | false | false | 2,189 | r | # block --- convert characters on STDIN to block letters on STDOUT
include "block_def.r.i"
character line (MAXLINE), outbuf (MAX_OUT), c, fill_char
integer in, out, row, col, mask, out_width, next_in
integer getlin
include MASK_DECL
call get_options (fill_char, out_width)
while (getlin (line, STDIN) ~= EOF) {
next_in = 1
repeat {
for (row = 0; row < ROWS_PER_CHAR; row += 1) {
do col = 1, out_width
outbuf (col) = ' 'c
out = 1
for (in = next_in; line (in) ~= EOS; in += 1) {
c = line (in)
if (c == BS) { # handle overstrikes
out -= COLS_PER_CHAR + COLS_BETWEEN_CHARS
next
}
elif (c < ' 'c) # ignore other control characters
next
else
mask = rs (masks (row / ROWS_PER_WORD + 1, c - ' 'c + 1),
mod (row, ROWS_PER_WORD) * COLS_PER_CHAR)
if (0 < out && out <= out_width - COLS_PER_CHAR) {
do col = 1, COLS_PER_CHAR; {
if (and (mask, 1) ~= 0)
outbuf (out) = fill_char
out += 1
mask = rs (mask, 1)
}
out += COLS_BETWEEN_CHARS
}
else
break
}
outbuf (out_width + 1) = EOS
call strim (outbuf)
call print (STDOUT, "*s*n"s, outbuf)
}
next_in = in
do row = 1, ROWS_BETWEEN_LINES
call putch (NEWLINE, STDOUT)
} until (line (next_in) == EOS)
}
stop
end
# get_options --- parse command line arguments for block
subroutine get_options (fill_char, out_width)
character fill_char
integer out_width
ARG_DECL
PARSE_COMMAND_LINE ("c<rs>w<ri>"s, _
"Usage: block { -c <char> | -w <width> }"s)
if (ARG_PRESENT (c))
fill_char = ARG_TEXT (c)
else
fill_char = '*'c
if (ARG_PRESENT (w))
out_width = ARG_VALUE (w)
else
out_width = 75
return
end
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weibull.R
\name{weibullHaz}
\alias{weibullHaz}
\title{weibullHaz function}
\usage{
weibullHaz()
}
\value{
an object inheriting class 'basehazardspec'
}
\description{
A function to define a parametric proportional hazards model where the baseline hazard is taken from the Weibull model.
This function returns an object inheriting class 'basehazardspec', list of functions 'distinfo', 'basehazard', 'gradbasehazard', 'hessbasehazard',
'cumbasehazard', 'gradcumbasehazard', 'hesscumbasehazard' and 'densityquantile'
}
\details{
The \code{distinfo} function is used to provide basic distribution specific information to other \code{spatsurv} functions. The user is required
to provide the following information in the returned list: \code{npars}, the number of parameters in this distribution; \code{parnames},
the names of the parameters; \code{trans}, the transformation scale on which the priors will be provided; \code{itrans}, the inverse
transformation function that will be applied to the parameters before the hazard, and other functions are evaluated; \code{jacobian},
the derivative of the inverse transformation function with respect to each of the parameters; and \code{hessian}, the second derivatives
of the inverse transformation function with respect to each of the parameters -- note that currently the package \code{spatsurv}
only allows the use of functions where the parameters are transformed independently.
The \code{basehazard} function is used to evaluate the baseline hazard function for the distribution of interest. It returns a
function that accepts as input a vector of times, \code{t} and returns a vector.
The \code{gradbasehazard} function is used to evaluate the gradient of the baseline hazard function with respect to the parameters,
this typically returns a vector. It returns a function that accepts as input a vector of times, \code{t}, and returns a matrix.
The \code{hessbasehazard} function is used to evaluate the Hessian of the baseline hazard function. It returns a function that accepts
as input a vector of times, \code{t} and returns a list of hessian matrices corresponding to each \code{t}.
The \code{cumbasehazard} function is used to evaluate the cumulative baseline hazard function for the distribution of interest.
It returns a function that accepts as input a vector of times, \code{t} and returns a vector.
The \code{gradcumbasehazard} function is used to evaluate the gradient of the cumulative baseline hazard function with respect
to the parameters, this typically returns a vector. It returns a function that accepts as input a vector of times, \code{t}, and returns a matrix.
The \code{hesscumbasehazard} function is used to evaluate the Hessian of the cumulative baseline hazard function. It returns a
function that accepts as input a vector of times, \code{t} and returns a list of hessian matrices corresponding to each \code{t}.
The \code{densityquantile} function is used to return quantiles of the density function. This is NOT REQUIRED for running the MCMC,
merely for us in post-processing with the \code{predict} function where \code{type} is 'densityquantile'. In the case of the Weibull
model for the baseline hazard, it can be shown that the q-th quantile is:
}
\seealso{
\link{tpowHaz}, \link{exponentialHaz}, \link{gompertzHaz}, \link{makehamHaz}
}
| /man/weibullHaz.Rd | no_license | bentaylor1/spatsurv | R | false | true | 3,420 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weibull.R
\name{weibullHaz}
\alias{weibullHaz}
\title{weibullHaz function}
\usage{
weibullHaz()
}
\value{
an object inheriting class 'basehazardspec'
}
\description{
A function to define a parametric proportional hazards model where the baseline hazard is taken from the Weibull model.
This function returns an object inheriting class 'basehazardspec', list of functions 'distinfo', 'basehazard', 'gradbasehazard', 'hessbasehazard',
'cumbasehazard', 'gradcumbasehazard', 'hesscumbasehazard' and 'densityquantile'
}
\details{
The \code{distinfo} function is used to provide basic distribution specific information to other \code{spatsurv} functions. The user is required
to provide the following information in the returned list: \code{npars}, the number of parameters in this distribution; \code{parnames},
the names of the parameters; \code{trans}, the transformation scale on which the priors will be provided; \code{itrans}, the inverse
transformation function that will be applied to the parameters before the hazard, and other functions are evaluated; \code{jacobian},
the derivative of the inverse transformation function with respect to each of the parameters; and \code{hessian}, the second derivatives
of the inverse transformation function with respect to each of the parameters -- note that currently the package \code{spatsurv}
only allows the use of functions where the parameters are transformed independently.
The \code{basehazard} function is used to evaluate the baseline hazard function for the distribution of interest. It returns a
function that accepts as input a vector of times, \code{t} and returns a vector.
The \code{gradbasehazard} function is used to evaluate the gradient of the baseline hazard function with respect to the parameters,
this typically returns a vector. It returns a function that accepts as input a vector of times, \code{t}, and returns a matrix.
The \code{hessbasehazard} function is used to evaluate the Hessian of the baseline hazard function. It returns a function that accepts
as input a vector of times, \code{t} and returns a list of hessian matrices corresponding to each \code{t}.
The \code{cumbasehazard} function is used to evaluate the cumulative baseline hazard function for the distribution of interest.
It returns a function that accepts as input a vector of times, \code{t} and returns a vector.
The \code{gradcumbasehazard} function is used to evaluate the gradient of the cumulative baseline hazard function with respect
to the parameters, this typically returns a vector. It returns a function that accepts as input a vector of times, \code{t}, and returns a matrix.
The \code{hesscumbasehazard} function is used to evaluate the Hessian of the cumulative baseline hazard function. It returns a
function that accepts as input a vector of times, \code{t} and returns a list of hessian matrices corresponding to each \code{t}.
The \code{densityquantile} function is used to return quantiles of the density function. This is NOT REQUIRED for running the MCMC,
merely for us in post-processing with the \code{predict} function where \code{type} is 'densityquantile'. In the case of the Weibull
model for the baseline hazard, it can be shown that the q-th quantile is:
}
\seealso{
\link{tpowHaz}, \link{exponentialHaz}, \link{gompertzHaz}, \link{makehamHaz}
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{class}
\name{mzIDdatabase-class}
\alias{mzIDdatabase-class}
\title{A class to store database information from an mzIdentML file}
\description{
This class handles parsing and storage of database information from mzIDentML files, residing at the
/MzIdentML/SequenceCollection/DBSequence node.
}
\details{
The content of the class is stored in a data.frame with columns depending on the content of the mzIdentML
file. Required information for files conforming to the mzIdentML standard are: 'accession', 'searchDatabase_ref'
and 'id', while additional information can fx be 'length' (number of residues), 'description' (from the fasta file)
and 'sequence' (the actual sequence).
}
\section{Objects from the class}{
Objects of mzIDdatabase are not meant to be created explicitly but as part of the \code{\link{mzID-class}}. Still
object can be created with the constructor \code{\link{mzIDdatabase}} (not exported).
}
\section{Slots}{
\describe{
\item{\code{database}:}{A data.frame containing references to all the database sequences from the mzIdentML file}
}
}
\section{Methods}{
\describe{
\item{\code{length}:}{Reports the number of entries in the database}
}
}
\seealso{
\code{\link{mzID-class}} \code{\link{mzIDdatabase}}
}
| /man/mzIDdatabase-class.Rd | no_license | vladpetyuk/mzID | R | false | false | 1,297 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\docType{class}
\name{mzIDdatabase-class}
\alias{mzIDdatabase-class}
\title{A class to store database information from an mzIdentML file}
\description{
This class handles parsing and storage of database information from mzIDentML files, residing at the
/MzIdentML/SequenceCollection/DBSequence node.
}
\details{
The content of the class is stored in a data.frame with columns depending on the content of the mzIdentML
file. Required information for files conforming to the mzIdentML standard are: 'accession', 'searchDatabase_ref'
and 'id', while additional information can fx be 'length' (number of residues), 'description' (from the fasta file)
and 'sequence' (the actual sequence).
}
\section{Objects from the class}{
Objects of mzIDdatabase are not meant to be created explicitly but as part of the \code{\link{mzID-class}}. Still
object can be created with the constructor \code{\link{mzIDdatabase}} (not exported).
}
\section{Slots}{
\describe{
\item{\code{database}:}{A data.frame containing references to all the database sequences from the mzIdentML file}
}
}
\section{Methods}{
\describe{
\item{\code{length}:}{Reports the number of entries in the database}
}
}
\seealso{
\code{\link{mzID-class}} \code{\link{mzIDdatabase}}
}
|
#' As S4 class to represent WhiBo Cluster model
#'
#' @slot whibo_cluster Whibo Clustering object - list of objects for White-Box Clustering
#' @rdname whibo_cluster
#' @name whibo_cluster-class
#' @exportClass whibo_cluster
#' @author Sandro Radovanovic
methods::setClass(Class = 'whibo_cluster', representation = 'list')
source(file = 'R/wc_normalization.R')
source(file = 'R/wc_initialization.R')
source(file = 'R/wc_assignment.R')
source(file = 'R/wc_recalculate.R')
source(file = 'R/wc_cluster_performance.R')
#' Find Cluster model using White-Box Cluster Algorithm Design.
#'
#' @param data Data on which clustering should be performed.
#' @param k Number of Cluster Representatives.
#' @param normalization_type Which normalization should be used (look at \code{wc_norm_types} for possible values). Default value is \code{No}.
#' @param cluster_initialization_type Which initialization of Cluster Representatives should be used (look at \code{wc_init_types} for possible values). Default value is \code{Random}.
#' @param assignment_type Which assignment function should be used (look at \code{wc_assign_types} for possible values). Default value is \code{Euclidean}.
#' @param recalculation_type Which function for updating Cluster Representatives should be used (look at \code{wc_recalculate_types} for possible values). Default value is \code{Mean}.
#' @param max_iteration Number of iterations. Default value is 20.
#' @param no_of_restarts Number of restarts of whole clustering procedure. Default value is 1.
#' @return Object of type \code{whibo_cluster} which include Cluster Representatives (\code{centroids}), number of elements per cluster (\code{elements_per_cluster}), assignments (\code{assignments}), measures of cluster quality (\code{within_sum_of_squares}, \code{between_ss_div_total_ss} and \code{internal_measures_of_quality}), cluster models per iterations (\code{model_history}), iterations (\code{iterations}) and parameters used (\code{params})
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @seealso \code{plot.whibo_cluster}, \code{predict.whibo_cluster}
#' @importFrom clusterCrit intCriteria
#' @examples
#' data <- iris[, 1:4] #Take only numerical columns
#'
#' #Perform k-means clustering
#' model <- whibo_clustering(data = data, k = 3)
#' model
#'
#' #Perform some unorthodox clustering
#' model <- whibo_clustering(data = data, k = 3,
#' normalization_type = 'Z', cluster_initialization_type = 'Ward',
#' assignment_type = 'Correlation', recalculation_type = 'Trimean')
#'
#' @export
whibo_clustering <- function(data, k = 3,
normalization_type = 'No', cluster_initialization_type = 'Random',
assignment_type = 'Euclidean', recalculation_type = 'Mean',
max_iteration = 20, no_of_restarts = 1)
{
#DATA NORMALIZATION
model <- wc_normalize(data = data, normalization_type = normalization_type)
data <- model$data
#PARAMETERS
params <- list('normalization_type' = normalization_type, 'cluster_initialization_type' = cluster_initialization_type,
'assignment_type' = assignment_type, 'recalculation_type' = recalculation_type, 'normalization_model' = model$model)
history_of_cluster_models <- list()
best_model <- NA
best_performance <- 0
#VARIABLES
history_of_cluster_models <- list()
best_model <- NA
best_performance <- 0
#TESTNG MULTIPLE TIMES OR ONCE IF SAID SO
for(m in 1:no_of_restarts)
{
#HELP VARIABLES
iteration <- 1
assignment_hist <- list()
centroids_hist <- list()
#GENERATE INITIAL REPRESENTATIVES
centroids <- wc_initialize(data = data, k = k,initialization_type = cluster_initialization_type)
centroids_hist[[iteration]] <- centroids
#ASSIGNING EXAMPLES TO CENTROIDS
assignments <- wc_assignment(data = data, centroids = centroids, assignment_type = assignment_type)
assignment_hist[[iteration]] <- assignments
last_centroids <- centroids
stoping <- FALSE
while(!stoping)
{
iteration <- iteration + 1
#RECALCULATE CLUSTER REPRESENTATIVES
centroids <- wc_recalculate(data = data, assignment = assignments, recalculate_type = recalculation_type, assignment_type = assignment_type)
centroids_hist[[iteration]] <- centroids
#ASSIGNING EXAMPLES TO CENTROIDS
assignments <- wc_assignment(data = data, centroids = centroids, assignment_type = assignment_type)
assignment_hist[[iteration]] <- assignments
#IF ASSIGNMENT WAS EMPTY
for(cent in 1:k)
{
if(nrow(centroids[centroids$WCCluster == cent, ]) == 0)
{
centroids <- rbind.data.frame(centroids, last_centroids[last_centroids$WCCluster == cent, ])
}
centroids <- centroids[order(centroids$WCCluster, decreasing = FALSE), ]
}
#CENTROIDS STOPPING
if (sum(centroids != last_centroids) == 0)
{
stoping <- TRUE
}
#ITERATION STOPPING
if(iteration == max_iteration)
{
stoping <- TRUE
}
last_centroids <- centroids
}
#END WHILE
elements_per_cluster <- table(assignments)
within_ss <- wc_eval_within_sum_of_squares(data = data, centroids = last_centroids, assignment = assignments)
between_ss <- wc_eval_between_sum_of_squares(data = data, centroids = last_centroids, assignment = assignments)
total_ss <- wc_eval_total_sum_of_squares(data = data)
evaluation <- clusterCrit::intCriteria(traj = as.matrix(data), part = as.integer(assignments), crit = 'all')
output <- list('centroids' = centroids, 'elements_per_cluster' = elements_per_cluster, 'assignments' = assignments,
'within_sum_of_squares' = within_ss, 'between_ss_total_ss' = sum(between_ss)/total_ss,
'internal_measures_of_quality' = evaluation,
'history' = list('centroids_history' = centroids_hist, 'assignment_history' = assignment_hist, 'num_of_iterations' = iteration))
history_of_cluster_models[[m]] <- output
if(sum(between_ss)/total_ss > best_performance)
{
best_performance <- sum(between_ss)/total_ss
best_model <- output
}
}
#END FOR
model_output <- list('centroids' = best_model$centroids, 'elements_per_cluster' = best_model$elements_per_cluster, 'assignments' = best_model$assignments,
'within_sum_of_squares' = best_model$within_sum_of_squares, 'between_ss_div_total_ss' = best_model$between_ss_total_ss,
'internal_measures_of_quality' = best_model$internal_measures_of_quality,
'model_history' = best_model$history, 'iterations' = history_of_cluster_models, 'params' = params)
class(model_output) <- 'whibo_cluster'
# model_output <- new(Class = 'whibo_cluster', model_output)
return(model_output)
}
#' Show White-Box Cluster Algorithm model
#'
#' @param x WhiBo Cluster model.
#' @param ... None of those will be used.
#' @return Summary text about Cluster model.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @seealso \code{summary.whibo_cluster}
#' @examples
#' data <- iris[, 1:4] #Numerical data only
#'
#' model <- whibo_clustering(data = data, k = 3)
#' print(model)
#'
#' @rdname print
#' @export
print.whibo_cluster <- function(x, ...)
{
model <- x
cat('----------WhiBo cluster model----------')
cat('\n')
cat(sprintf('Normalization type:\t\t%s\n', model$params$normalization_type))
cat(sprintf('Initialization type:\t\t%s\n', model$params$cluster_initialization_type))
cat(sprintf('Assignment type:\t\t%s\n', model$params$assignment_type))
cat(sprintf('Update repr. type:\t\t%s\n', model$params$recalculation_type))
cat('---------------------------------------')
cat('\n')
cat('\n')
cat(sprintf('Centroids:\n'))
print(model$centroids[, !grepl('WCCluster', colnames(model$centroids))])
cat('\n')
cat('\n')
cat(sprintf('Assignments:\n'))
print(model$assignments)
cat('\n')
cat(sprintf('Number of elements per cluster:\n'))
print(as.data.frame(t(as.matrix(model$elements_per_cluster))), row.names = F)
cat('\n')
cat('\n')
cat(sprintf('Finished in %s iterations', model$model_history$num_of_iterations))
cat('\n')
cat('---------------------------------------')
cat('\n')
cat('within sum of squares per cluster\n')
cat(model$within_sum_of_squares)
cat('\n')
cat('\n')
cat(sprintf('Between SS / Total SS: \t%2.2f%%\n', round(model$between_ss_div_total_ss * 100, digits = 2)))
cat(sprintf('Davies-Bouldin index: \t%1.3f\n', model$internal_measures_of_quality$davies_bouldin))
cat(sprintf('Silhoutte index: \t%1.3f\n', model$internal_measures_of_quality$silhouette))
cat(sprintf('Dunn index: \t\t%1.3f\n', model$internal_measures_of_quality$dunn))
cat(sprintf('C index: \t\t%1.3f\n', model$internal_measures_of_quality$c_index))
cat('Many more internal cluster evaluation metric can be found in internal_measures_of_quality list...')
cat('\n\n')
cat('You can access history of cluster model (each iteration) and each restart phase')
}
#' Show White-Box Cluster Algorithm model
#'
#' @param object WhiBo Cluster model.
#' @param ... None of those will be used.
#' @return Summary text about Cluster model.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @seealso \code{print.whibo_cluster}
#' @examples
#' data <- iris[, 1:4] #Numerical data only
#'
#' model <- whibo_clustering(data = data, k = 3)
#' summary(model)
#'
#' @rdname summary
#' @export
summary.whibo_cluster <- function(object, ...)
{
model <- object
cat('----------WhiBo cluster model----------')
cat('\n')
cat(sprintf('Normalization type:\t\t%s\n', model$params$normalization_type))
cat(sprintf('Initialization type:\t\t%s\n', model$params$cluster_initialization_type))
cat(sprintf('Assignment type:\t\t%s\n', model$params$assignment_type))
cat(sprintf('Update repr. type:\t\t%s\n', model$params$recalculation_type))
cat('---------------------------------------')
cat('\n')
cat('\n')
cat(sprintf('Centroids:\n'))
print(model$centroids[, !grepl('WCCluster', colnames(model$centroids))])
cat('\n')
cat('\n')
cat(sprintf('Assignments:\n'))
print(model$assignments)
cat('\n')
cat(sprintf('Number of elements per cluster:\n'))
print(as.data.frame(t(as.matrix(model$elements_per_cluster))), row.names = F)
cat('\n')
cat('\n')
cat(sprintf('Finished in %s iterations', model$model_history$num_of_iterations))
cat('\n')
cat('---------------------------------------')
cat('\n')
cat('within sum of squares per cluster\n')
cat(model$within_sum_of_squares)
cat('\n')
cat('\n')
cat(sprintf('Between SS / Total SS: \t%2.2f%%\n', round(model$between_ss_div_total_ss * 100, digits = 2)))
cat(sprintf('Davies-Bouldin index: \t%1.3f\n', model$internal_measures_of_quality$davies_bouldin))
cat(sprintf('Silhoutte index: \t%1.3f\n', model$internal_measures_of_quality$silhouette))
cat(sprintf('Dunn index: \t\t%1.3f\n', model$internal_measures_of_quality$dunn))
cat(sprintf('C index: \t\t%1.3f\n', model$internal_measures_of_quality$c_index))
cat('Many more internal cluster evaluation metric can be found in internal_measures_of_quality list...')
cat('\n\n')
cat('You can access history of cluster model (each iteration) and each restart phase')
}
#' Plot WhiBo Cluster Representatives
#'
#' @param x WhiBo Cluster model.
#' @param ... None of those will be used.
#' @return Line plot with Cluster representatives
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @examples
#' data <- iris[, 1:4] #Numerical data only
#'
#' model <- whibo_clustering(data = data, k = 3)
#' plot(model)
#'
#' @rdname plot
#' @export
plot.whibo_cluster <- function(x, ...)
{
model <- x
#Create basic plot without axes
graphics::matplot(t(model$centroids[, -1]), type = 'l', lty = rep(1, nrow(model$centroids)), xlab = 'Cluster Representatives', ylab = 'Value', axes = FALSE)
#Add axes
graphics::axis(2)
graphics::axis(1 ,at = 1:ncol(model$centroids[, -1]),labels = colnames(model$centroids[, -1]))
#Add box around plot
graphics::box()
}
# registerS3method("plot","whibo_cluster","plot.whibo_cluster", envir = getNamespace("whiboclustering"))
#' Plot WhiBo Cluster Representatives
#'
#' @param model WhiBo Cluster model.
#' @param data Data used for clustering (optional).
#' @return Ploting pairs plot where Cluster representatives are presented with data (if provided).
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @examples
#' data <- iris[, 1:4] #Numerical data only
#'
#' model <- whibo_clustering(data = data, k = 3)
#' plot_pairs(model) #Ploting Cluster Representatives only
#'
#' plot_pairs(model, data) #Ploting Cluster Representatives and Data
#'
#' @rdname plot_pairs
#' @export
plot_pairs <- function(model, data)
{
#Plot pairs
if (missing(data))
{
graphics::plot(model$centroids[ , !grepl('WCCluster', colnames(model$centroids))], cex = 2, pch = 3, col = seq(1:nrow(model$centroids)))
print('Data points are ommited from plot')
}
else
{
if(nrow(data) != length(model$assignments))
{
stop('There is discrepency between data and model')
}
new_data <- eval(call(name = as.character(wc_norm_types$Method[tolower(wc_norm_types$Type) == tolower(model$params$normalization_type)]), data, model$params$normalization_model))
new_data <- rbind.data.frame(new_data$data, model$centroids[, !grepl('WCCluster', colnames(model$centroids))])
graphics::plot(new_data, cex = c(rep(0.6, nrow(new_data) - nrow(model$centroids)), rep(1.4, nrow(model$centroids))), pch = c(rep(1, nrow(new_data) - nrow(model$centroids)), rep(3, nrow(model$centroids))), col = c(model$assignments, seq(1:nrow(model$centroids))))
}
}
#' Predict to which Cluster new data belongs
#'
#' @param object WhiBo Cluster model.
#' @param data Data for which Cluster should be obtained.
#' @param ... None of those will be used.
#' @return Vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @examples
#' data <- iris[1:100, 1:4] #Numerical data only and first 100 rows
#'
#' model <- whibo_clustering(data = data, k = 3)
#' predict(object = model, data = iris[101:150, 1:4])
#'
#' @rdname predict
#' @export
predict.whibo_cluster <- function(object, data, ...)
{
model <- object
if(missing(data))
{
return(model$assignments)
}
else
{
new_data <- eval(call(name = as.character(wc_norm_types$Method[tolower(wc_norm_types$Type) == tolower(model$params$normalization_type)]), data, model$params$normalization_model))
return(wc_assignment(data = new_data$data, centroids = model$centroids, assignment_type = model$params$assignment_type))
}
}
#Generate Manual file - Commented, but not forgoten
#system("R CMD Rd2pdf . --title=WhiBoClustering yourpackagename --output=././manual.pdf --force --no-clean --internals")
| /R/whibo_clustering.R | no_license | cran/whiboclustering | R | false | false | 15,390 | r | #' As S4 class to represent WhiBo Cluster model
#'
#' @slot whibo_cluster Whibo Clustering object - list of objects for White-Box Clustering
#' @rdname whibo_cluster
#' @name whibo_cluster-class
#' @exportClass whibo_cluster
#' @author Sandro Radovanovic
methods::setClass(Class = 'whibo_cluster', representation = 'list')
source(file = 'R/wc_normalization.R')
source(file = 'R/wc_initialization.R')
source(file = 'R/wc_assignment.R')
source(file = 'R/wc_recalculate.R')
source(file = 'R/wc_cluster_performance.R')
#' Find Cluster model using White-Box Cluster Algorithm Design.
#'
#' @param data Data on which clustering should be performed.
#' @param k Number of Cluster Representatives.
#' @param normalization_type Which normalization should be used (look at \code{wc_norm_types} for possible values). Default value is \code{No}.
#' @param cluster_initialization_type Which initialization of Cluster Representatives should be used (look at \code{wc_init_types} for possible values). Default value is \code{Random}.
#' @param assignment_type Which assignment function should be used (look at \code{wc_assign_types} for possible values). Default value is \code{Euclidean}.
#' @param recalculation_type Which function for updating Cluster Representatives should be used (look at \code{wc_recalculate_types} for possible values). Default value is \code{Mean}.
#' @param max_iteration Number of iterations. Default value is 20.
#' @param no_of_restarts Number of restarts of whole clustering procedure. Default value is 1.
#' @return Object of type \code{whibo_cluster} which include Cluster Representatives (\code{centroids}), number of elements per cluster (\code{elements_per_cluster}), assignments (\code{assignments}), measures of cluster quality (\code{within_sum_of_squares}, \code{between_ss_div_total_ss} and \code{internal_measures_of_quality}), cluster models per iterations (\code{model_history}), iterations (\code{iterations}) and parameters used (\code{params})
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @seealso \code{plot.whibo_cluster}, \code{predict.whibo_cluster}
#' @importFrom clusterCrit intCriteria
#' @examples
#' data <- iris[, 1:4] #Take only numerical columns
#'
#' #Perform k-means clustering
#' model <- whibo_clustering(data = data, k = 3)
#' model
#'
#' #Perform some unorthodox clustering
#' model <- whibo_clustering(data = data, k = 3,
#' normalization_type = 'Z', cluster_initialization_type = 'Ward',
#' assignment_type = 'Correlation', recalculation_type = 'Trimean')
#'
#' @export
whibo_clustering <- function(data, k = 3,
normalization_type = 'No', cluster_initialization_type = 'Random',
assignment_type = 'Euclidean', recalculation_type = 'Mean',
max_iteration = 20, no_of_restarts = 1)
{
#DATA NORMALIZATION
model <- wc_normalize(data = data, normalization_type = normalization_type)
data <- model$data
#PARAMETERS
params <- list('normalization_type' = normalization_type, 'cluster_initialization_type' = cluster_initialization_type,
'assignment_type' = assignment_type, 'recalculation_type' = recalculation_type, 'normalization_model' = model$model)
history_of_cluster_models <- list()
best_model <- NA
best_performance <- 0
#VARIABLES
history_of_cluster_models <- list()
best_model <- NA
best_performance <- 0
#TESTNG MULTIPLE TIMES OR ONCE IF SAID SO
for(m in 1:no_of_restarts)
{
#HELP VARIABLES
iteration <- 1
assignment_hist <- list()
centroids_hist <- list()
#GENERATE INITIAL REPRESENTATIVES
centroids <- wc_initialize(data = data, k = k,initialization_type = cluster_initialization_type)
centroids_hist[[iteration]] <- centroids
#ASSIGNING EXAMPLES TO CENTROIDS
assignments <- wc_assignment(data = data, centroids = centroids, assignment_type = assignment_type)
assignment_hist[[iteration]] <- assignments
last_centroids <- centroids
stoping <- FALSE
while(!stoping)
{
iteration <- iteration + 1
#RECALCULATE CLUSTER REPRESENTATIVES
centroids <- wc_recalculate(data = data, assignment = assignments, recalculate_type = recalculation_type, assignment_type = assignment_type)
centroids_hist[[iteration]] <- centroids
#ASSIGNING EXAMPLES TO CENTROIDS
assignments <- wc_assignment(data = data, centroids = centroids, assignment_type = assignment_type)
assignment_hist[[iteration]] <- assignments
#IF ASSIGNMENT WAS EMPTY
for(cent in 1:k)
{
if(nrow(centroids[centroids$WCCluster == cent, ]) == 0)
{
centroids <- rbind.data.frame(centroids, last_centroids[last_centroids$WCCluster == cent, ])
}
centroids <- centroids[order(centroids$WCCluster, decreasing = FALSE), ]
}
#CENTROIDS STOPPING
if (sum(centroids != last_centroids) == 0)
{
stoping <- TRUE
}
#ITERATION STOPPING
if(iteration == max_iteration)
{
stoping <- TRUE
}
last_centroids <- centroids
}
#END WHILE
elements_per_cluster <- table(assignments)
within_ss <- wc_eval_within_sum_of_squares(data = data, centroids = last_centroids, assignment = assignments)
between_ss <- wc_eval_between_sum_of_squares(data = data, centroids = last_centroids, assignment = assignments)
total_ss <- wc_eval_total_sum_of_squares(data = data)
evaluation <- clusterCrit::intCriteria(traj = as.matrix(data), part = as.integer(assignments), crit = 'all')
output <- list('centroids' = centroids, 'elements_per_cluster' = elements_per_cluster, 'assignments' = assignments,
'within_sum_of_squares' = within_ss, 'between_ss_total_ss' = sum(between_ss)/total_ss,
'internal_measures_of_quality' = evaluation,
'history' = list('centroids_history' = centroids_hist, 'assignment_history' = assignment_hist, 'num_of_iterations' = iteration))
history_of_cluster_models[[m]] <- output
if(sum(between_ss)/total_ss > best_performance)
{
best_performance <- sum(between_ss)/total_ss
best_model <- output
}
}
#END FOR
model_output <- list('centroids' = best_model$centroids, 'elements_per_cluster' = best_model$elements_per_cluster, 'assignments' = best_model$assignments,
'within_sum_of_squares' = best_model$within_sum_of_squares, 'between_ss_div_total_ss' = best_model$between_ss_total_ss,
'internal_measures_of_quality' = best_model$internal_measures_of_quality,
'model_history' = best_model$history, 'iterations' = history_of_cluster_models, 'params' = params)
class(model_output) <- 'whibo_cluster'
# model_output <- new(Class = 'whibo_cluster', model_output)
return(model_output)
}
#' Show White-Box Cluster Algorithm model
#'
#' @param x WhiBo Cluster model.
#' @param ... None of those will be used.
#' @return Summary text about Cluster model.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @seealso \code{summary.whibo_cluster}
#' @examples
#' data <- iris[, 1:4] #Numerical data only
#'
#' model <- whibo_clustering(data = data, k = 3)
#' print(model)
#'
#' @rdname print
#' @export
print.whibo_cluster <- function(x, ...)
{
model <- x
cat('----------WhiBo cluster model----------')
cat('\n')
cat(sprintf('Normalization type:\t\t%s\n', model$params$normalization_type))
cat(sprintf('Initialization type:\t\t%s\n', model$params$cluster_initialization_type))
cat(sprintf('Assignment type:\t\t%s\n', model$params$assignment_type))
cat(sprintf('Update repr. type:\t\t%s\n', model$params$recalculation_type))
cat('---------------------------------------')
cat('\n')
cat('\n')
cat(sprintf('Centroids:\n'))
print(model$centroids[, !grepl('WCCluster', colnames(model$centroids))])
cat('\n')
cat('\n')
cat(sprintf('Assignments:\n'))
print(model$assignments)
cat('\n')
cat(sprintf('Number of elements per cluster:\n'))
print(as.data.frame(t(as.matrix(model$elements_per_cluster))), row.names = F)
cat('\n')
cat('\n')
cat(sprintf('Finished in %s iterations', model$model_history$num_of_iterations))
cat('\n')
cat('---------------------------------------')
cat('\n')
cat('within sum of squares per cluster\n')
cat(model$within_sum_of_squares)
cat('\n')
cat('\n')
cat(sprintf('Between SS / Total SS: \t%2.2f%%\n', round(model$between_ss_div_total_ss * 100, digits = 2)))
cat(sprintf('Davies-Bouldin index: \t%1.3f\n', model$internal_measures_of_quality$davies_bouldin))
cat(sprintf('Silhoutte index: \t%1.3f\n', model$internal_measures_of_quality$silhouette))
cat(sprintf('Dunn index: \t\t%1.3f\n', model$internal_measures_of_quality$dunn))
cat(sprintf('C index: \t\t%1.3f\n', model$internal_measures_of_quality$c_index))
cat('Many more internal cluster evaluation metric can be found in internal_measures_of_quality list...')
cat('\n\n')
cat('You can access history of cluster model (each iteration) and each restart phase')
}
#' Show White-Box Cluster Algorithm model
#'
#' @param object WhiBo Cluster model.
#' @param ... None of those will be used.
#' @return Summary text about Cluster model.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @seealso \code{print.whibo_cluster}
#' @examples
#' data <- iris[, 1:4] #Numerical data only
#'
#' model <- whibo_clustering(data = data, k = 3)
#' summary(model)
#'
#' @rdname summary
#' @export
summary.whibo_cluster <- function(object, ...)
{
model <- object
cat('----------WhiBo cluster model----------')
cat('\n')
cat(sprintf('Normalization type:\t\t%s\n', model$params$normalization_type))
cat(sprintf('Initialization type:\t\t%s\n', model$params$cluster_initialization_type))
cat(sprintf('Assignment type:\t\t%s\n', model$params$assignment_type))
cat(sprintf('Update repr. type:\t\t%s\n', model$params$recalculation_type))
cat('---------------------------------------')
cat('\n')
cat('\n')
cat(sprintf('Centroids:\n'))
print(model$centroids[, !grepl('WCCluster', colnames(model$centroids))])
cat('\n')
cat('\n')
cat(sprintf('Assignments:\n'))
print(model$assignments)
cat('\n')
cat(sprintf('Number of elements per cluster:\n'))
print(as.data.frame(t(as.matrix(model$elements_per_cluster))), row.names = F)
cat('\n')
cat('\n')
cat(sprintf('Finished in %s iterations', model$model_history$num_of_iterations))
cat('\n')
cat('---------------------------------------')
cat('\n')
cat('within sum of squares per cluster\n')
cat(model$within_sum_of_squares)
cat('\n')
cat('\n')
cat(sprintf('Between SS / Total SS: \t%2.2f%%\n', round(model$between_ss_div_total_ss * 100, digits = 2)))
cat(sprintf('Davies-Bouldin index: \t%1.3f\n', model$internal_measures_of_quality$davies_bouldin))
cat(sprintf('Silhoutte index: \t%1.3f\n', model$internal_measures_of_quality$silhouette))
cat(sprintf('Dunn index: \t\t%1.3f\n', model$internal_measures_of_quality$dunn))
cat(sprintf('C index: \t\t%1.3f\n', model$internal_measures_of_quality$c_index))
cat('Many more internal cluster evaluation metric can be found in internal_measures_of_quality list...')
cat('\n\n')
cat('You can access history of cluster model (each iteration) and each restart phase')
}
#' Plot WhiBo Cluster Representatives
#'
#' @param x WhiBo Cluster model.
#' @param ... None of those will be used.
#' @return Line plot with Cluster representatives
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @examples
#' data <- iris[, 1:4] #Numerical data only
#'
#' model <- whibo_clustering(data = data, k = 3)
#' plot(model)
#'
#' @rdname plot
#' @export
plot.whibo_cluster <- function(x, ...)
{
model <- x
#Create basic plot without axes
graphics::matplot(t(model$centroids[, -1]), type = 'l', lty = rep(1, nrow(model$centroids)), xlab = 'Cluster Representatives', ylab = 'Value', axes = FALSE)
#Add axes
graphics::axis(2)
graphics::axis(1 ,at = 1:ncol(model$centroids[, -1]),labels = colnames(model$centroids[, -1]))
#Add box around plot
graphics::box()
}
# registerS3method("plot","whibo_cluster","plot.whibo_cluster", envir = getNamespace("whiboclustering"))
#' Plot WhiBo Cluster Representatives
#'
#' @param model WhiBo Cluster model.
#' @param data Data used for clustering (optional).
#' @return Ploting pairs plot where Cluster representatives are presented with data (if provided).
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @examples
#' data <- iris[, 1:4] #Numerical data only
#'
#' model <- whibo_clustering(data = data, k = 3)
#' plot_pairs(model) #Ploting Cluster Representatives only
#'
#' plot_pairs(model, data) #Ploting Cluster Representatives and Data
#'
#' @rdname plot_pairs
#' @export
plot_pairs <- function(model, data)
{
#Plot pairs
if (missing(data))
{
graphics::plot(model$centroids[ , !grepl('WCCluster', colnames(model$centroids))], cex = 2, pch = 3, col = seq(1:nrow(model$centroids)))
print('Data points are ommited from plot')
}
else
{
if(nrow(data) != length(model$assignments))
{
stop('There is discrepency between data and model')
}
new_data <- eval(call(name = as.character(wc_norm_types$Method[tolower(wc_norm_types$Type) == tolower(model$params$normalization_type)]), data, model$params$normalization_model))
new_data <- rbind.data.frame(new_data$data, model$centroids[, !grepl('WCCluster', colnames(model$centroids))])
graphics::plot(new_data, cex = c(rep(0.6, nrow(new_data) - nrow(model$centroids)), rep(1.4, nrow(model$centroids))), pch = c(rep(1, nrow(new_data) - nrow(model$centroids)), rep(3, nrow(model$centroids))), col = c(model$assignments, seq(1:nrow(model$centroids))))
}
}
#' Predict to which Cluster new data belongs
#'
#' @param object WhiBo Cluster model.
#' @param data Data for which Cluster should be obtained.
#' @param ... None of those will be used.
#' @return Vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
#' @examples
#' data <- iris[1:100, 1:4] #Numerical data only and first 100 rows
#'
#' model <- whibo_clustering(data = data, k = 3)
#' predict(object = model, data = iris[101:150, 1:4])
#'
#' @rdname predict
#' @export
predict.whibo_cluster <- function(object, data, ...)
{
model <- object
if(missing(data))
{
return(model$assignments)
}
else
{
new_data <- eval(call(name = as.character(wc_norm_types$Method[tolower(wc_norm_types$Type) == tolower(model$params$normalization_type)]), data, model$params$normalization_model))
return(wc_assignment(data = new_data$data, centroids = model$centroids, assignment_type = model$params$assignment_type))
}
}
#Generate Manual file - Commented, but not forgoten
#system("R CMD Rd2pdf . --title=WhiBoClustering yourpackagename --output=././manual.pdf --force --no-clean --internals")
|
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
B <- matrix(c(5,10,7, 10),2,2)
B1 <- makeCacheMatrix(B)
cacheSolve(B1)
cacheSolve(B1)
| /Coursera 3 week_assignment.R | no_license | artursunov/datasciencecoursera | R | false | false | 631 | r | makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
B <- matrix(c(5,10,7, 10),2,2)
B1 <- makeCacheMatrix(B)
cacheSolve(B1)
cacheSolve(B1)
|
#####################################################################################################################
# login_screen.R - Maintenance_Metrics Source file for Server Module.
#
# Author: Aravind
# Created: 02/06/2020.
#####################################################################################################################
# Start of the observe's'
# 1. Observe to load the columns from DB into below reactive values.
observe({
req(input$select_pack)
if(input$select_pack != "Select"){
values$riskmetrics_mm <-
db_fun(
paste0(
"SELECT * FROM MaintenanceMetrics WHERE MaintenanceMetrics.mm_id ='",
input$select_pack,
"'"
)
)
values$package_has_vignettes <- c(strsplit(values$riskmetrics_mm$package_has_vignettes,",")[[1]][1], strsplit(values$riskmetrics_mm$package_has_vignettes,",")[[1]][2])
values$package_has_website <- c(strsplit(values$riskmetrics_mm$package_has_website,",")[[1]][1], strsplit(values$riskmetrics_mm$package_has_website,",")[[1]][2])
values$package_has_news <- c(strsplit(values$riskmetrics_mm$package_has_news,",")[[1]][1], strsplit(values$riskmetrics_mm$package_has_news,",")[[1]][2])
values$news_is_current <- c(strsplit(values$riskmetrics_mm$news_is_current,",")[[1]][1], strsplit(values$riskmetrics_mm$news_is_current,",")[[1]][2])
values$has_bug_reports <- c(strsplit(values$riskmetrics_mm$has_bug_reports,",")[[1]][1], strsplit(values$riskmetrics_mm$has_bug_reports,",")[[1]][2])
values$status_of_last_30_reported_bugs <- c(strsplit(values$riskmetrics_mm$status_of_last_30_reported_bugs,",")[[1]][1], strsplit(values$riskmetrics_mm$status_of_last_30_reported_bugs,",")[[1]][2])
values$exported_objects_with_documentation <- c(strsplit(values$riskmetrics_mm$exported_objects_with_documentation,",")[[1]][1], strsplit(values$riskmetrics_mm$exported_objects_with_documentation,",")[[1]][2])
values$source_code_is_public <- c(strsplit(values$riskmetrics_mm$source_code_is_public,",")[[1]][1], strsplit(values$riskmetrics_mm$source_code_is_public,",")[[1]][2])
values$has_a_package_maintainer <- c(strsplit(values$riskmetrics_mm$has_a_package_maintainer,",")[[1]][1], strsplit(values$riskmetrics_mm$has_a_package_maintainer,",")[[1]][2])
}
}) # End of the observe.
# 2. Observe to disable and enable the submit and comment box when the decision column is empty.
observe({
req(input$tabs)
runjs("setTimeout(function(){ capturingSizeOfInfoBoxes(); }, 500);")
if (input$tabs == "mm_tab_value") {
if (!is.null(input$mm_comment)) {
if(values$package_has_vignettes[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('vignette');}, 500);" ) }
if(values$package_has_website[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('website');}, 500);" ) }
if(values$package_has_news[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('hasnews');}, 500);" ) }
if(values$news_is_current[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('newscurrent');}, 500);" ) }
if(values$has_bug_reports[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('bugtrack');}, 500);" ) }
if(values$status_of_last_30_reported_bugs[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesColorWhenNA('bugstatus');}, 500);" ) }
if(values$exported_objects_with_documentation[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesColorWhenNA('exporthelp');}, 500);" ) }
if(values$source_code_is_public[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('source_pub');}, 500);" ) }
if(values$has_a_package_maintainer[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('pack_maint');}, 500);" ) }
req(values$selected_pkg$decision)
if (values$selected_pkg$decision != "") {
runjs("setTimeout(function(){disableUI('mm_comment')}, 500);")
runjs("setTimeout(function(){disableUI('submit_mm_comment')}, 500);")
}
}}
}) # End of the Observe.
# End of the observe's'
# Start of the render Output's'
# 1. Render Output Info box to show the information on VIGNETTE Content.
output$vignette <- renderInfoBox({
req(values$package_has_vignettes)
infoBox(
title = "Presence of vignettes?",
if(values$package_has_vignettes[1] == 1){"YES"}
else if(values$package_has_vignettes[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$package_has_vignettes[2] == -1){"Metric is not applicable for this source of package"}
else{paste("The package has", values$package_has_vignettes[2], "Vignettes")},
icon = icon(
ifelse(values$package_has_vignettes[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$package_has_vignettes[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 2. Render Output Info box to show the information on Package Has Website.
output$website <- renderInfoBox({
req(values$package_has_website)
infoBox(
title = "Associated website URL?",
if(values$package_has_website[1] == 1){"YES"}
else if(values$package_has_website[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$package_has_website[2] == -1){"Metric is not applicable for this source of package"}
else{ ifelse(values$package_has_website[1] == 1, paste("Website:",values$package_has_website[2]), "The package does not have an associated website URL")},
icon = icon(
ifelse(values$package_has_website[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$package_has_website[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 3. Render Output Info box to show the Package Has News? Content.
output$hasnews <- renderInfoBox({
req(values$package_has_news)
infoBox(
title = "NEWS?",
if(values$package_has_news[1] == 1){"YES"}
else if(values$package_has_news[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$package_has_news[2] == -1){"Metric is not applicable for this source of package"}
else{ ifelse(values$package_has_news[1] == 1, "The package has a NEWS file.", "The package does not have a NEWS file")},
icon = icon(
ifelse(values$package_has_news[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$package_has_news[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 4. Render Output Info box to show the information for News is Current?
output$newscurrent <- renderInfoBox({
req(values$news_is_current)
infoBox(
title = "News is current?",
if(values$news_is_current[1] == 1){"YES"}
else if(values$news_is_current[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$news_is_current[2] == -1){"Metric is not applicable for this source of package"}
else{ ifelse(values$news_is_current[1] == 1, "NEWS file contains entry for current version number", "NEWS file does not contains entry for current version number")},
icon = icon(
ifelse(values$news_is_current[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$news_is_current[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 5. Render Output Info box to show the information for Does the package have Bug Report?
output$bugtrack <- renderInfoBox({
req(values$has_bug_reports)
infoBox(
title = "Bugs publicly documented?",
if(values$has_bug_reports[1] == 1){"YES"}
else if(values$has_bug_reports[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$has_bug_reports[2] == -1){"Metric is not applicable for this source of package"}
else{ ifelse(values$has_bug_reports[1] == 1, paste("Bug reports URL:", values$has_bug_reports[2]), "The Bugs are not publicly documented")},
icon = icon(
ifelse(values$has_bug_reports[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$has_bug_reports[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 6. Render Output Info box to show the information on Bugs Status.
output$bugstatus <- renderInfoBox({
req(values$status_of_last_30_reported_bugs)
infoBox(
title = "Bug closure",
if(values$status_of_last_30_reported_bugs[2] == -1){"NA"}
else{paste0(values$status_of_last_30_reported_bugs[1],"%")},
subtitle = if(values$status_of_last_30_reported_bugs[2] == -1){"Metric is not applicable for this source of package"}
else{"Percentage of last 30 bugs closed"},
width = 3,
fill = TRUE
)
}) # End of the render Output.
# 7. Render Output Info box to show the information on Export help.
output$exporthelp <- renderInfoBox({
req(values$exported_objects_with_documentation)
infoBox(
title = "Documentation",
if(values$exported_objects_with_documentation[2] == -1){"NA"}
else{paste0(values$exported_objects_with_documentation[1],"%")},
subtitle = if(values$exported_objects_with_documentation[2] == -1){"Metric is not applicable for this source of package"}
else{"Proportion of exported objects documented"},
width = 3,
fill = TRUE
)
}) # End of the render Output.
# 8. Render Output Info box to show the information on source code is public?.
output$source_pub <- renderInfoBox({
req(values$source_code_is_public)
infoBox(
title = "Source code public?",
if(values$source_code_is_public[1] == 1){"YES"}
else if(values$source_code_is_public[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$source_code_is_public[2] == -1){"Metric is not applicable for this soucre of package"}
else{ ifelse(values$source_code_is_public[1] == 1, paste("Source code URL:", values$source_code_is_public[2]), "Package does not have a Source code URL")},
icon = icon(
ifelse(values$source_code_is_public[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$source_code_is_public[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the Render Output.
# 9. Render Output Info box to show the information on Has a package maintainer?.
output$pack_maint <- renderInfoBox({
req(values$has_a_package_maintainer)
infoBox(
title = "Has a maintainer?",
if(values$has_a_package_maintainer[1] == 1){"YES"}
else if(values$has_a_package_maintainer[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$has_a_package_maintainer[2] == -1){"Metric is not applicable for this soucre of package"}
else{ ifelse(values$has_a_package_maintainer[1] == 1, values$has_a_package_maintainer[2], "Package does not have a Maintainer")},
icon = icon(
ifelse(
values$has_a_package_maintainer[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$has_a_package_maintainer[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 10. Render Output to show the comments on the application.
output$mm_commented <- renderText({
if (values$mm_comment_submitted == "yes" ||
values$mm_comment_submitted == "no") {
values$comment_mm1 <-
db_fun(
paste0(
"SELECT user_name, user_role, comment, added_on FROM Comments WHERE comm_id = '",
input$select_pack,
"' AND comment_type = 'mm'"
)
)
values$comment_mm2 <- data.frame(values$comment_mm1 %>% map(rev))
req(values$comment_mm2$comment)
values$mm_comment_submitted <- "no"
paste(
"<div class='col-sm-12 comment-border-bottom single-comment-div'><i class='fa fa-user-tie fa-4x'></i><h3 class='ml-3'><b class='user-name-color'>",
values$comment_mm2$user_name,
"(",
values$comment_mm2$user_role,
")",
"</b><sub>",
values$comment_mm2$added_on,
"</sub></h3><h4 class='ml-3 lh-4'>",
values$comment_mm2$comment,
"</h4></div>"
)
}
}) # End of the render Output.
# End of the Render Output's'.
values$mm_comment_submitted <- "no"
# Observe event for submit button.
observeEvent(input$submit_mm_comment, {
if (trimws(input$mm_comment) != "") {
db_fun(
paste0(
"INSERT INTO Comments values('",
input$select_pack,
"',",
"'",
values$name,
"'," ,
"'",
values$role,
"',",
"'",
input$mm_comment,
"',",
"'mm'," ,
"'",
TimeStamp(),
"'" ,
")"
)
)
values$mm_comment_submitted <- "yes"
updateTextAreaInput(session, "mm_comment", value = "")
}
}) # End of the Observe Event.
# End of the Maintenance_Metrics Source file for Server Module.
| /Server/maintenance_metrics.R | permissive | jmanitz/risk_assessment | R | false | false | 13,003 | r | #####################################################################################################################
# login_screen.R - Maintenance_Metrics Source file for Server Module.
#
# Author: Aravind
# Created: 02/06/2020.
#####################################################################################################################
# Start of the observe's'
# 1. Observe to load the columns from DB into below reactive values.
observe({
req(input$select_pack)
if(input$select_pack != "Select"){
values$riskmetrics_mm <-
db_fun(
paste0(
"SELECT * FROM MaintenanceMetrics WHERE MaintenanceMetrics.mm_id ='",
input$select_pack,
"'"
)
)
values$package_has_vignettes <- c(strsplit(values$riskmetrics_mm$package_has_vignettes,",")[[1]][1], strsplit(values$riskmetrics_mm$package_has_vignettes,",")[[1]][2])
values$package_has_website <- c(strsplit(values$riskmetrics_mm$package_has_website,",")[[1]][1], strsplit(values$riskmetrics_mm$package_has_website,",")[[1]][2])
values$package_has_news <- c(strsplit(values$riskmetrics_mm$package_has_news,",")[[1]][1], strsplit(values$riskmetrics_mm$package_has_news,",")[[1]][2])
values$news_is_current <- c(strsplit(values$riskmetrics_mm$news_is_current,",")[[1]][1], strsplit(values$riskmetrics_mm$news_is_current,",")[[1]][2])
values$has_bug_reports <- c(strsplit(values$riskmetrics_mm$has_bug_reports,",")[[1]][1], strsplit(values$riskmetrics_mm$has_bug_reports,",")[[1]][2])
values$status_of_last_30_reported_bugs <- c(strsplit(values$riskmetrics_mm$status_of_last_30_reported_bugs,",")[[1]][1], strsplit(values$riskmetrics_mm$status_of_last_30_reported_bugs,",")[[1]][2])
values$exported_objects_with_documentation <- c(strsplit(values$riskmetrics_mm$exported_objects_with_documentation,",")[[1]][1], strsplit(values$riskmetrics_mm$exported_objects_with_documentation,",")[[1]][2])
values$source_code_is_public <- c(strsplit(values$riskmetrics_mm$source_code_is_public,",")[[1]][1], strsplit(values$riskmetrics_mm$source_code_is_public,",")[[1]][2])
values$has_a_package_maintainer <- c(strsplit(values$riskmetrics_mm$has_a_package_maintainer,",")[[1]][1], strsplit(values$riskmetrics_mm$has_a_package_maintainer,",")[[1]][2])
}
}) # End of the observe.
# 2. Observe to disable and enable the submit and comment box when the decision column is empty.
observe({
req(input$tabs)
runjs("setTimeout(function(){ capturingSizeOfInfoBoxes(); }, 500);")
if (input$tabs == "mm_tab_value") {
if (!is.null(input$mm_comment)) {
if(values$package_has_vignettes[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('vignette');}, 500);" ) }
if(values$package_has_website[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('website');}, 500);" ) }
if(values$package_has_news[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('hasnews');}, 500);" ) }
if(values$news_is_current[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('newscurrent');}, 500);" ) }
if(values$has_bug_reports[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('bugtrack');}, 500);" ) }
if(values$status_of_last_30_reported_bugs[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesColorWhenNA('bugstatus');}, 500);" ) }
if(values$exported_objects_with_documentation[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesColorWhenNA('exporthelp');}, 500);" ) }
if(values$source_code_is_public[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('source_pub');}, 500);" ) }
if(values$has_a_package_maintainer[2] == -1){ runjs( "setTimeout(function(){ updateInfoBoxesWhenNA('pack_maint');}, 500);" ) }
req(values$selected_pkg$decision)
if (values$selected_pkg$decision != "") {
runjs("setTimeout(function(){disableUI('mm_comment')}, 500);")
runjs("setTimeout(function(){disableUI('submit_mm_comment')}, 500);")
}
}}
}) # End of the Observe.
# End of the observe's'
# Start of the render Output's'
# 1. Render Output Info box to show the information on VIGNETTE Content.
output$vignette <- renderInfoBox({
req(values$package_has_vignettes)
infoBox(
title = "Presence of vignettes?",
if(values$package_has_vignettes[1] == 1){"YES"}
else if(values$package_has_vignettes[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$package_has_vignettes[2] == -1){"Metric is not applicable for this source of package"}
else{paste("The package has", values$package_has_vignettes[2], "Vignettes")},
icon = icon(
ifelse(values$package_has_vignettes[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$package_has_vignettes[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 2. Render Output Info box to show the information on Package Has Website.
output$website <- renderInfoBox({
req(values$package_has_website)
infoBox(
title = "Associated website URL?",
if(values$package_has_website[1] == 1){"YES"}
else if(values$package_has_website[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$package_has_website[2] == -1){"Metric is not applicable for this source of package"}
else{ ifelse(values$package_has_website[1] == 1, paste("Website:",values$package_has_website[2]), "The package does not have an associated website URL")},
icon = icon(
ifelse(values$package_has_website[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$package_has_website[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 3. Render Output Info box to show the Package Has News? Content.
output$hasnews <- renderInfoBox({
req(values$package_has_news)
infoBox(
title = "NEWS?",
if(values$package_has_news[1] == 1){"YES"}
else if(values$package_has_news[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$package_has_news[2] == -1){"Metric is not applicable for this source of package"}
else{ ifelse(values$package_has_news[1] == 1, "The package has a NEWS file.", "The package does not have a NEWS file")},
icon = icon(
ifelse(values$package_has_news[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$package_has_news[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 4. Render Output Info box to show the information for News is Current?
output$newscurrent <- renderInfoBox({
req(values$news_is_current)
infoBox(
title = "News is current?",
if(values$news_is_current[1] == 1){"YES"}
else if(values$news_is_current[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$news_is_current[2] == -1){"Metric is not applicable for this source of package"}
else{ ifelse(values$news_is_current[1] == 1, "NEWS file contains entry for current version number", "NEWS file does not contains entry for current version number")},
icon = icon(
ifelse(values$news_is_current[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$news_is_current[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 5. Render Output Info box to show the information for Does the package have Bug Report?
output$bugtrack <- renderInfoBox({
req(values$has_bug_reports)
infoBox(
title = "Bugs publicly documented?",
if(values$has_bug_reports[1] == 1){"YES"}
else if(values$has_bug_reports[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$has_bug_reports[2] == -1){"Metric is not applicable for this source of package"}
else{ ifelse(values$has_bug_reports[1] == 1, paste("Bug reports URL:", values$has_bug_reports[2]), "The Bugs are not publicly documented")},
icon = icon(
ifelse(values$has_bug_reports[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$has_bug_reports[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 6. Render Output Info box to show the information on Bugs Status.
output$bugstatus <- renderInfoBox({
req(values$status_of_last_30_reported_bugs)
infoBox(
title = "Bug closure",
if(values$status_of_last_30_reported_bugs[2] == -1){"NA"}
else{paste0(values$status_of_last_30_reported_bugs[1],"%")},
subtitle = if(values$status_of_last_30_reported_bugs[2] == -1){"Metric is not applicable for this source of package"}
else{"Percentage of last 30 bugs closed"},
width = 3,
fill = TRUE
)
}) # End of the render Output.
# 7. Render Output Info box to show the information on Export help.
output$exporthelp <- renderInfoBox({
req(values$exported_objects_with_documentation)
infoBox(
title = "Documentation",
if(values$exported_objects_with_documentation[2] == -1){"NA"}
else{paste0(values$exported_objects_with_documentation[1],"%")},
subtitle = if(values$exported_objects_with_documentation[2] == -1){"Metric is not applicable for this source of package"}
else{"Proportion of exported objects documented"},
width = 3,
fill = TRUE
)
}) # End of the render Output.
# 8. Render Output Info box to show the information on source code is public?.
output$source_pub <- renderInfoBox({
req(values$source_code_is_public)
infoBox(
title = "Source code public?",
if(values$source_code_is_public[1] == 1){"YES"}
else if(values$source_code_is_public[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$source_code_is_public[2] == -1){"Metric is not applicable for this soucre of package"}
else{ ifelse(values$source_code_is_public[1] == 1, paste("Source code URL:", values$source_code_is_public[2]), "Package does not have a Source code URL")},
icon = icon(
ifelse(values$source_code_is_public[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$source_code_is_public[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the Render Output.
# 9. Render Output Info box to show the information on Has a package maintainer?.
output$pack_maint <- renderInfoBox({
req(values$has_a_package_maintainer)
infoBox(
title = "Has a maintainer?",
if(values$has_a_package_maintainer[1] == 1){"YES"}
else if(values$has_a_package_maintainer[2] == -1){"NA"}
else{"NO"},
width = 3,
if(values$has_a_package_maintainer[2] == -1){"Metric is not applicable for this soucre of package"}
else{ ifelse(values$has_a_package_maintainer[1] == 1, values$has_a_package_maintainer[2], "Package does not have a Maintainer")},
icon = icon(
ifelse(
values$has_a_package_maintainer[1] == 1, "thumbs-up", "thumbs-down"),
lib = "glyphicon"
),
color = ifelse(values$has_a_package_maintainer[1] == 1, "green", "red"),
fill = TRUE
)
}) # End of the render Output.
# 10. Render Output to show the comments on the application.
output$mm_commented <- renderText({
if (values$mm_comment_submitted == "yes" ||
values$mm_comment_submitted == "no") {
values$comment_mm1 <-
db_fun(
paste0(
"SELECT user_name, user_role, comment, added_on FROM Comments WHERE comm_id = '",
input$select_pack,
"' AND comment_type = 'mm'"
)
)
values$comment_mm2 <- data.frame(values$comment_mm1 %>% map(rev))
req(values$comment_mm2$comment)
values$mm_comment_submitted <- "no"
paste(
"<div class='col-sm-12 comment-border-bottom single-comment-div'><i class='fa fa-user-tie fa-4x'></i><h3 class='ml-3'><b class='user-name-color'>",
values$comment_mm2$user_name,
"(",
values$comment_mm2$user_role,
")",
"</b><sub>",
values$comment_mm2$added_on,
"</sub></h3><h4 class='ml-3 lh-4'>",
values$comment_mm2$comment,
"</h4></div>"
)
}
}) # End of the render Output.
# End of the Render Output's'.
values$mm_comment_submitted <- "no"
# Observe event for submit button.
observeEvent(input$submit_mm_comment, {
if (trimws(input$mm_comment) != "") {
db_fun(
paste0(
"INSERT INTO Comments values('",
input$select_pack,
"',",
"'",
values$name,
"'," ,
"'",
values$role,
"',",
"'",
input$mm_comment,
"',",
"'mm'," ,
"'",
TimeStamp(),
"'" ,
")"
)
)
values$mm_comment_submitted <- "yes"
updateTextAreaInput(session, "mm_comment", value = "")
}
}) # End of the Observe Event.
# End of the Maintenance_Metrics Source file for Server Module.
|
#' DsATACsc
#'
#' A class for storing single-cell ATAC-seq accessibility data
#' inherits from \code{\linkS4class{DsATAC}}. Provides a few additional methods
#' but is otherwise identical to \code{\linkS4class{DsATAC}}.
#'
#' @name DsATACsc-class
#' @rdname DsATACsc-class
#' @author Fabian Mueller
#' @exportClass DsATACsc
setClass("DsATACsc",
contains = "DsATAC",
package = "ChrAccR"
)
setMethod("initialize","DsATACsc",
function(
.Object,
fragments,
coord,
counts,
sampleAnnot,
genome,
diskDump,
diskDump.fragments,
diskDump.fragments.nSamplesPerFile,
sparseCounts
) {
.Object@fragments <- fragments
.Object@coord <- coord
.Object@counts <- counts
.Object@countTransform <- rep(list(character(0)), length(.Object@counts))
names(.Object@countTransform) <- names(.Object@counts)
.Object@sampleAnnot <- sampleAnnot
.Object@genome <- genome
.Object@diskDump <- diskDump
.Object@diskDump.fragments <- diskDump.fragments
.Object@diskDump.fragments.nSamplesPerFile <- diskDump.fragments.nSamplesPerFile
.Object@sparseCounts <- sparseCounts
.Object@pkgVersion <- packageVersion("ChrAccR")
.Object
}
)
#' @noRd
DsATACsc <- function(sampleAnnot, genome, diskDump=FALSE, diskDump.fragments=TRUE, sparseCounts=TRUE){
obj <- new("DsATACsc",
list(),
list(),
list(),
sampleAnnot,
genome,
diskDump,
diskDump.fragments,
diskDump.fragments.nSamplesPerFile=500L,
sparseCounts
)
return(obj)
}
################################################################################
# Single-cell methods
################################################################################
#-------------------------------------------------------------------------------
if (!isGeneric("getScQcStatsTab")) {
setGeneric(
"getScQcStatsTab",
function(.object, ...) standardGeneric("getScQcStatsTab"),
signature=c(".object")
)
}
#' getScQcStatsTab-methods
#'
#' Retrieve a table of QC statistics for single cells
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @return an \code{data.frame} contain QC statistics for each cell
#'
#' @rdname getScQcStatsTab-DsATACsc-method
#' @docType methods
#' @aliases getScQcStatsTab
#' @aliases getScQcStatsTab,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("getScQcStatsTab",
signature(
.object="DsATACsc"
),
function(
.object
) {
cellAnnot <- getSampleAnnot(.object)
sampleIdCn <- findOrderedNames(colnames(cellAnnot), c(".sampleid", "sampleid", ".CR.cellQC.barcode"), ignore.case=TRUE)
nFragCns <- c(
total=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.total"),
pass=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.passed_filters"),
tss=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.TSS_fragments"),
peak=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.peak_region_fragments"),
duplicate=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.duplicate"),
mito=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.mitochondrial")
)
summaryDf <- data.frame(cell=getSamples(.object), sample=rep("sample", nrow(cellAnnot)))
if (!is.na(sampleIdCn)) summaryDf[,"sample"] <- cellAnnot[,sampleIdCn]
for (cn in c("total", "pass")){
summaryDf[,muRtools::normalize.str(paste("n", cn, sep="_"), return.camel=TRUE)] <- cellAnnot[,nFragCns[cn]]
}
# to be divided by total reads
for (cn in c("mito", "duplicate")){
if (!is.na(nFragCns[cn])) summaryDf[,muRtools::normalize.str(paste("frac", cn, sep="_"), return.camel=TRUE)] <- cellAnnot[,nFragCns[cn]]/summaryDf[,"nTotal"]
}
# to be divided by passing reads
for (cn in c("tss", "peak")){
if (!is.na(nFragCns[cn])) summaryDf[,muRtools::normalize.str(paste("frac", cn, sep="_"), return.camel=TRUE)] <- cellAnnot[,nFragCns[cn]]/summaryDf[,"nPass"]
}
return(summaryDf)
}
)
if (!isGeneric("unsupervisedAnalysisSc")) {
setGeneric(
"unsupervisedAnalysisSc",
function(.object, ...) standardGeneric("unsupervisedAnalysisSc"),
signature=c(".object")
)
}
#' unsupervisedAnalysisSc-methods
#'
#' Perform unsupervised analysis on single-cell data. Performs dimensionality reduction
#' and clustering.
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param regionType character string specifying the region type
#' @param regionIdx indices of regions to be used (logical or integer vector). If \code{NULL} (default) all regions of the specified regionType will be used.
#' @param dimRedMethod character string specifying the dimensionality reduction method. Currently on \code{"tf-idf_irlba"} is supported
#' @param usePcs integer vector specifying the principal components to use for UMAP and clustering
#' @param clusteringMethod character string specifying the clustering method. Currently on \code{"seurat_louvain"} is supported
#' @return an \code{S3} object containing dimensionality reduction results and clustering
#'
#' @rdname unsupervisedAnalysisSc-DsATACsc-method
#' @docType methods
#' @aliases unsupervisedAnalysisSc
#' @aliases unsupervisedAnalysisSc,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("unsupervisedAnalysisSc",
signature(
.object="DsATACsc"
),
function(
.object,
regionType,
regionIdx=NULL,
dimRedMethod="tf-idf_irlba",
usePcs=1:50,
clusteringMethod="seurat_louvain"
) {
if (!is.element(regionType, getRegionTypes(.object))) logger.error(c("Unsupported region type:", regionType))
if (!is.element(dimRedMethod, c("tf-idf_irlba"))) logger.error(c("Unsupported dimRedMethod:", dimRedMethod))
if (!is.integer(usePcs)) logger.error(c("usePcs must be an integer vector"))
if (!is.element(clusteringMethod, c("seurat_louvain"))) logger.error(c("Unsupported clusteringMethod:", clusteringMethod))
if (!is.null(regionIdx)){
if (is.logical(regionIdx)) regionIdx <- which(regionIdx)
if (!is.integer(regionIdx) || any(regionIdx < 1) || any(regionIdx > getNRegions(.object, regionType))) logger.error("Invalid regionIdx")
}
dsn <- .object
if (!is.null(regionIdx)){
nRegs <- getNRegions(.object, regionType)
logger.info(c("Retaining", length(regionIdx), "regions for dimensionality reduction"))
idx <- rep(TRUE, nRegs)
idx[regionIdx] <- FALSE
dsn <- removeRegions(.object, idx, regionType)
}
if (dimRedMethod=="tf-idf_irlba"){
logger.start(c("Performing dimensionality reduction using", dimRedMethod))
if (length(dsn@countTransform[[regionType]]) > 0) logger.warning("Counts have been pre-normalized. dimRedMethod 'tf-idf_irlba' might not be applicable.")
if (!is.element("tf-idf", dsn@countTransform[[regionType]])){
dsn <- transformCounts(dsn, method="tf-idf", regionTypes=regionType)
}
cm <- ChrAccR::getCounts(dsn, regionType, asMatrix=TRUE)
pcaCoord <- muRtools::getDimRedCoords.pca(t(cm), components=1:max(usePcs), method="irlba_svd")
logger.completed()
}
cellIds <- colnames(cm)
logger.start(c("Getting UMAP coordinates"))
umapCoord <- muRtools::getDimRedCoords.umap(pcaCoord[,usePcs])
umapRes <- attr(umapCoord, "umapRes")
attr(umapCoord, "umapRes") <- NULL
logger.completed()
if (clusteringMethod=="seurat_louvain"){
logger.start(c("Performing clustering using", clusteringMethod))
if (!requireNamespace("Seurat")) logger.error(c("Could not load dependency: Seurat"))
# Louvain clustering using Seurat
dummyMat <- matrix(11.0, ncol=length(cellIds), nrow=11)
colnames(dummyMat) <- cellIds
rownames(dummyMat) <- paste0("df", 1:nrow(dummyMat))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcaCoord, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=usePcs, k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10)
clustAss <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss) <- names(clustRes@active.ident)
logger.completed()
}
res <- list(
pcaCoord=pcaCoord,
umapCoord=umapCoord,
umapRes=umapRes,
clustAss=clustAss,
regionType=regionType,
regionIdx=regionIdx
)
class(res) <- "unsupervisedAnalysisResultSc"
return(res)
}
)
#-------------------------------------------------------------------------------
if (!isGeneric("iterativeLSI")) {
setGeneric(
"iterativeLSI",
function(.object, ...) standardGeneric("iterativeLSI"),
signature=c(".object")
)
}
#' iterativeLSI-methods
#'
#' EXPERIMENTAL: Perform iterative LSI clustering as described in doi:10.1101/696328
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param it0regionType character string specifying the region type to start with
#' @param it0nMostAcc the number of the most accessible regions to consider in iteration 0
#' @param it0pcs the principal components to consider in iteration 0
#' @param it0clusterResolution resolution paramter for Seurat's clustering (\code{Seurat::FindClusters}) in iteration 0
#' @param it0nTopPeaksPerCluster the number of best peaks to be considered for each cluster in the merged peak set (iteration 0)
#' @param it1pcs the principal components to consider in iteration 0
#' @param it1clusterResolution resolution paramter for Seurat's clustering (\code{Seurat::FindClusters}) in iteration 1
#' @param it1mostVarPeaks the number of the most variable peaks to consider after iteration 1
#' @param it2pcs the principal components to consider in the final iteration (2)
#' @param it2clusterResolution resolution paramter for Seurat's clustering (\code{Seurat::FindClusters}) in the final iteration (2)
#' @return an \code{S3} object containing dimensionality reduction results and clustering
#'
#' @rdname iterativeLSI-DsATACsc-method
#' @docType methods
#' @aliases iterativeLSI
#' @aliases iterativeLSI,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("iterativeLSI",
signature(
.object="DsATACsc"
),
function(
.object,
it0regionType="t5k",
it0nMostAcc=20000L,
it0pcs=2:25,
it0clusterResolution=0.8,
it0nTopPeaksPerCluster=2e5,
it1pcs=1:50,
it1clusterResolution=0.8,
it1mostVarPeaks=50000L,
it2pcs=1:50,
it2clusterResolution=0.8
) {
callParams <- as.list(match.call())
callParams <- callParams[setdiff(names(callParams), ".object")]
cellIds <- getSamples(.object)
if (length(.object@fragments) != length(cellIds)) logger.error("Object does not contain fragment information for all samples")
ph <- getSampleAnnot(.object)
depthCol <- colnames(ph) %in% c("numIns", ".CR.cellQC.passed_filters", ".CR.cellQC.total")
depthV <- NULL
if (any(depthCol)){
depthV <- ph[,colnames(ph)[depthCol][1]]
}
logger.start("Iteration 0")
dsr <- .object
for (rt in setdiff(getRegionTypes(dsr), it0regionType)){
dsr <- removeRegionType(dsr, rt)
}
if (!is.null(it0nMostAcc)){
regAcc <- safeMatrixStats(ChrAccR::getCounts(dsr, it0regionType, allowSparseMatrix=TRUE), statFun="rowMeans", na.rm=TRUE)
if (it0nMostAcc < length(regAcc)){
idx2rem <- rank(-regAcc, na.last="keep", ties.method="min") > it0nMostAcc
logger.info(c("Retaining the", sum(!idx2rem), "most accessible regions for dimensionality reduction"))
dsr <- removeRegions(dsr, idx2rem, it0regionType)
}
}
logger.start(c("Performing TF-IDF-based dimension reduction"))
if (length(dsr@countTransform[[it0regionType]]) > 0) logger.warning("Counts have been pre-normalized. 'tf-idf' might not be applicable.")
dsn <- transformCounts(dsr, method="tf-idf", regionTypes=it0regionType)
cm <- ChrAccR::getCounts(dsn, it0regionType, allowSparseMatrix=TRUE)
pcaCoord_it0 <- muRtools::getDimRedCoords.pca(safeMatrixStats(cm, "t"), components=1:max(it0pcs), method="irlba_svd")
if (!is.null(depthV)){
cc <- cor(pcaCoord_it0[,1], depthV, method="spearman")
logger.info(c("Correlation (Spearman) of PC1 with cell fragment counts:", round(cc, 4)))
}
pcaCoord_it0 <- pcaCoord_it0[, it0pcs, drop=FALSE]
logger.completed()
logger.start(c("Clustering"))
if (!requireNamespace("Seurat")) logger.error(c("Could not load dependency: Seurat"))
# Louvain clustering using Seurat
dummyMat <- matrix(11.0, ncol=length(cellIds), nrow=11)
colnames(dummyMat) <- cellIds
rownames(dummyMat) <- paste0("df", 1:nrow(dummyMat))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcaCoord_it0, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=1:ncol(pcaCoord_it0), k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10, resolution=it0clusterResolution)
clustAss_it0 <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss_it0) <- names(clustRes@active.ident)
logger.info(c("Number of clusters found:", nlevels(clustAss_it0)))
logger.completed()
logger.start(c("Peak calling"))
logger.start("Creating cluster pseudo-bulk samples")
dsr <- addSampleAnnotCol(dsr, "clustAss_it0", as.character(clustAss_it0[cellIds]))
dsrClust <- mergeSamples(dsr, "clustAss_it0", countAggrFun="sum")
logger.completed()
logger.start("Calling peaks")
clustPeakGrl <- callPeaks(dsrClust)
if (!is.null(it0nTopPeaksPerCluster)){
logger.info(paste0("Selecting the", it0nTopPeaksPerCluster, " peaks with highest score for each cluster"))
clustPeakGrl <- GRangesList(lapply(clustPeakGrl, FUN=function(x){
idx <- rank(-elementMetadata(x)[,"score_norm"], na.last="keep", ties.method="min") <= it0nTopPeaksPerCluster
x[idx]
}))
}
peakUnionGr <- getNonOverlappingByScore(unlist(clustPeakGrl), scoreCol="score_norm")
peakUnionGr <- sortGr(peakUnionGr)
logger.completed()
logger.start("Aggregating counts for union peak set")
# dsrClust <- regionAggregation(dsrClust, peakUnionGr, "clusterPeaks", signal="insertions", dropEmpty=FALSE)
dsr <- regionAggregation(dsr, peakUnionGr, "clusterPeaks", signal="insertions", dropEmpty=FALSE, bySample=FALSE)
logger.completed()
logger.completed()
logger.completed()
logger.start("Iteration 1")
it1regionType <- "clusterPeaks"
logger.start(c("Performing TF-IDF-based dimension reduction"))
dsr <- removeRegionType(dsr, it0regionType)
dsn <- transformCounts(dsr, method="tf-idf", regionTypes=it1regionType) #TODO: renormalize based on sequencing depth rather than aggregated counts across peaks only?
cm <- ChrAccR::getCounts(dsn, it1regionType, allowSparseMatrix=TRUE)
pcaCoord_it1 <- muRtools::getDimRedCoords.pca(safeMatrixStats(cm, "t"), components=1:max(it1pcs), method="irlba_svd")[, it1pcs, drop=FALSE]
logger.completed()
logger.start(c("Clustering"))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcaCoord_it1, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=1:ncol(pcaCoord_it1), k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10, resolution=it1clusterResolution)
clustAss_it1 <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss_it1) <- names(clustRes@active.ident)
logger.info(c("Number of clusters found:", nlevels(clustAss_it1)))
logger.completed()
if (!is.null(it1mostVarPeaks) && it1mostVarPeaks < nrow(cm)){
logger.start(c("Identifying cluster-variable peaks"))
logger.start("Creating cluster pseudo-bulk samples")
dsr <- addSampleAnnotCol(dsr, "clustAss_it1", as.character(clustAss_it1[cellIds]))
dsrClust <- mergeSamples(dsr, "clustAss_it1", countAggrFun="sum")
logger.completed()
logger.start("Identifying target peaks")
dsnClust <- transformCounts(dsrClust, method="RPKM", regionTypes=it1regionType)
l2cpm <- log2(ChrAccR::getCounts(dsnClust, it1regionType) / 1e3 + 1) # compute log2(CPM) from RPKM
peakVar <- matrixStats::rowVars(l2cpm, na.rm=TRUE)
if (it1mostVarPeaks < length(peakVar)){
idx2rem <- rank(-peakVar, na.last="keep", ties.method="min") > it1mostVarPeaks
logger.info(c("Retaining the", sum(!idx2rem), "most variable peaks"))
dsr <- removeRegions(dsr, idx2rem, it1regionType)
}
peakCoords <- ChrAccR::getCoord(dsr, it1regionType)
logger.completed()
logger.completed()
}
logger.completed()
logger.start("Iteration 2")
it2regionType <- it1regionType
logger.start(c("Performing TF-IDF-based dimension reduction"))
bcm_unnorm <- ChrAccR::getCounts(dsr, it2regionType, allowSparseMatrix=TRUE) > 0 # unnormalized binary count matrix
idfBase <- log(1 + ncol(bcm_unnorm) / safeMatrixStats(bcm_unnorm, "rowSums", na.rm=TRUE))
dsn <- transformCounts(dsr, method="tf-idf", regionTypes=it2regionType) #TODO: renormalize based on sequencing depth rather than aggregated counts across peaks only?
cm <- ChrAccR::getCounts(dsn, it2regionType, allowSparseMatrix=TRUE)
pcaCoord <- muRtools::getDimRedCoords.pca(safeMatrixStats(cm, "t"), components=1:max(it2pcs), method="irlba_svd")
pcaCoord_sel <- pcaCoord[, it2pcs, drop=FALSE]
logger.completed()
logger.start(c("Clustering"))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcaCoord_sel, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=1:ncol(pcaCoord_sel), k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10, resolution=it2clusterResolution)
clustAss <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss) <- names(clustRes@active.ident)
logger.info(c("Number of clusters found:", nlevels(clustAss)))
dsr <- addSampleAnnotCol(dsr, "clustAss_it2", as.character(clustAss[cellIds]))
logger.completed()
logger.start(c("UMAP coordinates"))
umapCoord <- muRtools::getDimRedCoords.umap(pcaCoord_sel)
umapRes <- attr(umapCoord, "umapRes")
attr(umapCoord, "umapRes") <- NULL
logger.completed()
logger.completed()
res <- list(
pcaCoord=pcaCoord,
pcs = it2pcs,
idfBase=idfBase,
umapCoord=umapCoord,
umapRes=umapRes,
clustAss=clustAss,
regionGr=peakCoords,
clusterPeaks_unfiltered=peakUnionGr,
iterationData = list(
iteration0 = list(
pcaCoord=pcaCoord_it0,
clustAss=clustAss_it0
),
iteration1 = list(
pcaCoord=pcaCoord_it1,
clustAss=clustAss_it1
)
),
.params=callParams
)
class(res) <- "iterativeLSIResultSc"
return(res)
}
)
| /R/DsATACsc-class.R | no_license | Jessica-2019/ChrAccR | R | false | false | 19,076 | r | #' DsATACsc
#'
#' A class for storing single-cell ATAC-seq accessibility data
#' inherits from \code{\linkS4class{DsATAC}}. Provides a few additional methods
#' but is otherwise identical to \code{\linkS4class{DsATAC}}.
#'
#' @name DsATACsc-class
#' @rdname DsATACsc-class
#' @author Fabian Mueller
#' @exportClass DsATACsc
setClass("DsATACsc",
contains = "DsATAC",
package = "ChrAccR"
)
setMethod("initialize","DsATACsc",
function(
.Object,
fragments,
coord,
counts,
sampleAnnot,
genome,
diskDump,
diskDump.fragments,
diskDump.fragments.nSamplesPerFile,
sparseCounts
) {
.Object@fragments <- fragments
.Object@coord <- coord
.Object@counts <- counts
.Object@countTransform <- rep(list(character(0)), length(.Object@counts))
names(.Object@countTransform) <- names(.Object@counts)
.Object@sampleAnnot <- sampleAnnot
.Object@genome <- genome
.Object@diskDump <- diskDump
.Object@diskDump.fragments <- diskDump.fragments
.Object@diskDump.fragments.nSamplesPerFile <- diskDump.fragments.nSamplesPerFile
.Object@sparseCounts <- sparseCounts
.Object@pkgVersion <- packageVersion("ChrAccR")
.Object
}
)
#' @noRd
DsATACsc <- function(sampleAnnot, genome, diskDump=FALSE, diskDump.fragments=TRUE, sparseCounts=TRUE){
obj <- new("DsATACsc",
list(),
list(),
list(),
sampleAnnot,
genome,
diskDump,
diskDump.fragments,
diskDump.fragments.nSamplesPerFile=500L,
sparseCounts
)
return(obj)
}
################################################################################
# Single-cell methods
################################################################################
#-------------------------------------------------------------------------------
if (!isGeneric("getScQcStatsTab")) {
setGeneric(
"getScQcStatsTab",
function(.object, ...) standardGeneric("getScQcStatsTab"),
signature=c(".object")
)
}
#' getScQcStatsTab-methods
#'
#' Retrieve a table of QC statistics for single cells
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @return an \code{data.frame} contain QC statistics for each cell
#'
#' @rdname getScQcStatsTab-DsATACsc-method
#' @docType methods
#' @aliases getScQcStatsTab
#' @aliases getScQcStatsTab,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("getScQcStatsTab",
signature(
.object="DsATACsc"
),
function(
.object
) {
cellAnnot <- getSampleAnnot(.object)
sampleIdCn <- findOrderedNames(colnames(cellAnnot), c(".sampleid", "sampleid", ".CR.cellQC.barcode"), ignore.case=TRUE)
nFragCns <- c(
total=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.total"),
pass=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.passed_filters"),
tss=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.TSS_fragments"),
peak=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.peak_region_fragments"),
duplicate=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.duplicate"),
mito=findOrderedNames(colnames(cellAnnot), ".CR.cellQC.mitochondrial")
)
summaryDf <- data.frame(cell=getSamples(.object), sample=rep("sample", nrow(cellAnnot)))
if (!is.na(sampleIdCn)) summaryDf[,"sample"] <- cellAnnot[,sampleIdCn]
for (cn in c("total", "pass")){
summaryDf[,muRtools::normalize.str(paste("n", cn, sep="_"), return.camel=TRUE)] <- cellAnnot[,nFragCns[cn]]
}
# to be divided by total reads
for (cn in c("mito", "duplicate")){
if (!is.na(nFragCns[cn])) summaryDf[,muRtools::normalize.str(paste("frac", cn, sep="_"), return.camel=TRUE)] <- cellAnnot[,nFragCns[cn]]/summaryDf[,"nTotal"]
}
# to be divided by passing reads
for (cn in c("tss", "peak")){
if (!is.na(nFragCns[cn])) summaryDf[,muRtools::normalize.str(paste("frac", cn, sep="_"), return.camel=TRUE)] <- cellAnnot[,nFragCns[cn]]/summaryDf[,"nPass"]
}
return(summaryDf)
}
)
if (!isGeneric("unsupervisedAnalysisSc")) {
setGeneric(
"unsupervisedAnalysisSc",
function(.object, ...) standardGeneric("unsupervisedAnalysisSc"),
signature=c(".object")
)
}
#' unsupervisedAnalysisSc-methods
#'
#' Perform unsupervised analysis on single-cell data. Performs dimensionality reduction
#' and clustering.
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param regionType character string specifying the region type
#' @param regionIdx indices of regions to be used (logical or integer vector). If \code{NULL} (default) all regions of the specified regionType will be used.
#' @param dimRedMethod character string specifying the dimensionality reduction method. Currently on \code{"tf-idf_irlba"} is supported
#' @param usePcs integer vector specifying the principal components to use for UMAP and clustering
#' @param clusteringMethod character string specifying the clustering method. Currently on \code{"seurat_louvain"} is supported
#' @return an \code{S3} object containing dimensionality reduction results and clustering
#'
#' @rdname unsupervisedAnalysisSc-DsATACsc-method
#' @docType methods
#' @aliases unsupervisedAnalysisSc
#' @aliases unsupervisedAnalysisSc,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("unsupervisedAnalysisSc",
signature(
.object="DsATACsc"
),
function(
.object,
regionType,
regionIdx=NULL,
dimRedMethod="tf-idf_irlba",
usePcs=1:50,
clusteringMethod="seurat_louvain"
) {
if (!is.element(regionType, getRegionTypes(.object))) logger.error(c("Unsupported region type:", regionType))
if (!is.element(dimRedMethod, c("tf-idf_irlba"))) logger.error(c("Unsupported dimRedMethod:", dimRedMethod))
if (!is.integer(usePcs)) logger.error(c("usePcs must be an integer vector"))
if (!is.element(clusteringMethod, c("seurat_louvain"))) logger.error(c("Unsupported clusteringMethod:", clusteringMethod))
if (!is.null(regionIdx)){
if (is.logical(regionIdx)) regionIdx <- which(regionIdx)
if (!is.integer(regionIdx) || any(regionIdx < 1) || any(regionIdx > getNRegions(.object, regionType))) logger.error("Invalid regionIdx")
}
dsn <- .object
if (!is.null(regionIdx)){
nRegs <- getNRegions(.object, regionType)
logger.info(c("Retaining", length(regionIdx), "regions for dimensionality reduction"))
idx <- rep(TRUE, nRegs)
idx[regionIdx] <- FALSE
dsn <- removeRegions(.object, idx, regionType)
}
if (dimRedMethod=="tf-idf_irlba"){
logger.start(c("Performing dimensionality reduction using", dimRedMethod))
if (length(dsn@countTransform[[regionType]]) > 0) logger.warning("Counts have been pre-normalized. dimRedMethod 'tf-idf_irlba' might not be applicable.")
if (!is.element("tf-idf", dsn@countTransform[[regionType]])){
dsn <- transformCounts(dsn, method="tf-idf", regionTypes=regionType)
}
cm <- ChrAccR::getCounts(dsn, regionType, asMatrix=TRUE)
pcaCoord <- muRtools::getDimRedCoords.pca(t(cm), components=1:max(usePcs), method="irlba_svd")
logger.completed()
}
cellIds <- colnames(cm)
logger.start(c("Getting UMAP coordinates"))
umapCoord <- muRtools::getDimRedCoords.umap(pcaCoord[,usePcs])
umapRes <- attr(umapCoord, "umapRes")
attr(umapCoord, "umapRes") <- NULL
logger.completed()
if (clusteringMethod=="seurat_louvain"){
logger.start(c("Performing clustering using", clusteringMethod))
if (!requireNamespace("Seurat")) logger.error(c("Could not load dependency: Seurat"))
# Louvain clustering using Seurat
dummyMat <- matrix(11.0, ncol=length(cellIds), nrow=11)
colnames(dummyMat) <- cellIds
rownames(dummyMat) <- paste0("df", 1:nrow(dummyMat))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcaCoord, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=usePcs, k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10)
clustAss <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss) <- names(clustRes@active.ident)
logger.completed()
}
res <- list(
pcaCoord=pcaCoord,
umapCoord=umapCoord,
umapRes=umapRes,
clustAss=clustAss,
regionType=regionType,
regionIdx=regionIdx
)
class(res) <- "unsupervisedAnalysisResultSc"
return(res)
}
)
#-------------------------------------------------------------------------------
if (!isGeneric("iterativeLSI")) {
setGeneric(
"iterativeLSI",
function(.object, ...) standardGeneric("iterativeLSI"),
signature=c(".object")
)
}
#' iterativeLSI-methods
#'
#' EXPERIMENTAL: Perform iterative LSI clustering as described in doi:10.1101/696328
#'
#' @param .object \code{\linkS4class{DsATACsc}} object
#' @param it0regionType character string specifying the region type to start with
#' @param it0nMostAcc the number of the most accessible regions to consider in iteration 0
#' @param it0pcs the principal components to consider in iteration 0
#' @param it0clusterResolution resolution paramter for Seurat's clustering (\code{Seurat::FindClusters}) in iteration 0
#' @param it0nTopPeaksPerCluster the number of best peaks to be considered for each cluster in the merged peak set (iteration 0)
#' @param it1pcs the principal components to consider in iteration 0
#' @param it1clusterResolution resolution paramter for Seurat's clustering (\code{Seurat::FindClusters}) in iteration 1
#' @param it1mostVarPeaks the number of the most variable peaks to consider after iteration 1
#' @param it2pcs the principal components to consider in the final iteration (2)
#' @param it2clusterResolution resolution paramter for Seurat's clustering (\code{Seurat::FindClusters}) in the final iteration (2)
#' @return an \code{S3} object containing dimensionality reduction results and clustering
#'
#' @rdname iterativeLSI-DsATACsc-method
#' @docType methods
#' @aliases iterativeLSI
#' @aliases iterativeLSI,DsATACsc-method
#' @author Fabian Mueller
#' @export
setMethod("iterativeLSI",
signature(
.object="DsATACsc"
),
function(
.object,
it0regionType="t5k",
it0nMostAcc=20000L,
it0pcs=2:25,
it0clusterResolution=0.8,
it0nTopPeaksPerCluster=2e5,
it1pcs=1:50,
it1clusterResolution=0.8,
it1mostVarPeaks=50000L,
it2pcs=1:50,
it2clusterResolution=0.8
) {
callParams <- as.list(match.call())
callParams <- callParams[setdiff(names(callParams), ".object")]
cellIds <- getSamples(.object)
if (length(.object@fragments) != length(cellIds)) logger.error("Object does not contain fragment information for all samples")
ph <- getSampleAnnot(.object)
depthCol <- colnames(ph) %in% c("numIns", ".CR.cellQC.passed_filters", ".CR.cellQC.total")
depthV <- NULL
if (any(depthCol)){
depthV <- ph[,colnames(ph)[depthCol][1]]
}
logger.start("Iteration 0")
dsr <- .object
for (rt in setdiff(getRegionTypes(dsr), it0regionType)){
dsr <- removeRegionType(dsr, rt)
}
if (!is.null(it0nMostAcc)){
regAcc <- safeMatrixStats(ChrAccR::getCounts(dsr, it0regionType, allowSparseMatrix=TRUE), statFun="rowMeans", na.rm=TRUE)
if (it0nMostAcc < length(regAcc)){
idx2rem <- rank(-regAcc, na.last="keep", ties.method="min") > it0nMostAcc
logger.info(c("Retaining the", sum(!idx2rem), "most accessible regions for dimensionality reduction"))
dsr <- removeRegions(dsr, idx2rem, it0regionType)
}
}
logger.start(c("Performing TF-IDF-based dimension reduction"))
if (length(dsr@countTransform[[it0regionType]]) > 0) logger.warning("Counts have been pre-normalized. 'tf-idf' might not be applicable.")
dsn <- transformCounts(dsr, method="tf-idf", regionTypes=it0regionType)
cm <- ChrAccR::getCounts(dsn, it0regionType, allowSparseMatrix=TRUE)
pcaCoord_it0 <- muRtools::getDimRedCoords.pca(safeMatrixStats(cm, "t"), components=1:max(it0pcs), method="irlba_svd")
if (!is.null(depthV)){
cc <- cor(pcaCoord_it0[,1], depthV, method="spearman")
logger.info(c("Correlation (Spearman) of PC1 with cell fragment counts:", round(cc, 4)))
}
pcaCoord_it0 <- pcaCoord_it0[, it0pcs, drop=FALSE]
logger.completed()
logger.start(c("Clustering"))
if (!requireNamespace("Seurat")) logger.error(c("Could not load dependency: Seurat"))
# Louvain clustering using Seurat
dummyMat <- matrix(11.0, ncol=length(cellIds), nrow=11)
colnames(dummyMat) <- cellIds
rownames(dummyMat) <- paste0("df", 1:nrow(dummyMat))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcaCoord_it0, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=1:ncol(pcaCoord_it0), k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10, resolution=it0clusterResolution)
clustAss_it0 <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss_it0) <- names(clustRes@active.ident)
logger.info(c("Number of clusters found:", nlevels(clustAss_it0)))
logger.completed()
logger.start(c("Peak calling"))
logger.start("Creating cluster pseudo-bulk samples")
dsr <- addSampleAnnotCol(dsr, "clustAss_it0", as.character(clustAss_it0[cellIds]))
dsrClust <- mergeSamples(dsr, "clustAss_it0", countAggrFun="sum")
logger.completed()
logger.start("Calling peaks")
clustPeakGrl <- callPeaks(dsrClust)
if (!is.null(it0nTopPeaksPerCluster)){
logger.info(paste0("Selecting the", it0nTopPeaksPerCluster, " peaks with highest score for each cluster"))
clustPeakGrl <- GRangesList(lapply(clustPeakGrl, FUN=function(x){
idx <- rank(-elementMetadata(x)[,"score_norm"], na.last="keep", ties.method="min") <= it0nTopPeaksPerCluster
x[idx]
}))
}
peakUnionGr <- getNonOverlappingByScore(unlist(clustPeakGrl), scoreCol="score_norm")
peakUnionGr <- sortGr(peakUnionGr)
logger.completed()
logger.start("Aggregating counts for union peak set")
# dsrClust <- regionAggregation(dsrClust, peakUnionGr, "clusterPeaks", signal="insertions", dropEmpty=FALSE)
dsr <- regionAggregation(dsr, peakUnionGr, "clusterPeaks", signal="insertions", dropEmpty=FALSE, bySample=FALSE)
logger.completed()
logger.completed()
logger.completed()
logger.start("Iteration 1")
it1regionType <- "clusterPeaks"
logger.start(c("Performing TF-IDF-based dimension reduction"))
dsr <- removeRegionType(dsr, it0regionType)
dsn <- transformCounts(dsr, method="tf-idf", regionTypes=it1regionType) #TODO: renormalize based on sequencing depth rather than aggregated counts across peaks only?
cm <- ChrAccR::getCounts(dsn, it1regionType, allowSparseMatrix=TRUE)
pcaCoord_it1 <- muRtools::getDimRedCoords.pca(safeMatrixStats(cm, "t"), components=1:max(it1pcs), method="irlba_svd")[, it1pcs, drop=FALSE]
logger.completed()
logger.start(c("Clustering"))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcaCoord_it1, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=1:ncol(pcaCoord_it1), k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10, resolution=it1clusterResolution)
clustAss_it1 <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss_it1) <- names(clustRes@active.ident)
logger.info(c("Number of clusters found:", nlevels(clustAss_it1)))
logger.completed()
if (!is.null(it1mostVarPeaks) && it1mostVarPeaks < nrow(cm)){
logger.start(c("Identifying cluster-variable peaks"))
logger.start("Creating cluster pseudo-bulk samples")
dsr <- addSampleAnnotCol(dsr, "clustAss_it1", as.character(clustAss_it1[cellIds]))
dsrClust <- mergeSamples(dsr, "clustAss_it1", countAggrFun="sum")
logger.completed()
logger.start("Identifying target peaks")
dsnClust <- transformCounts(dsrClust, method="RPKM", regionTypes=it1regionType)
l2cpm <- log2(ChrAccR::getCounts(dsnClust, it1regionType) / 1e3 + 1) # compute log2(CPM) from RPKM
peakVar <- matrixStats::rowVars(l2cpm, na.rm=TRUE)
if (it1mostVarPeaks < length(peakVar)){
idx2rem <- rank(-peakVar, na.last="keep", ties.method="min") > it1mostVarPeaks
logger.info(c("Retaining the", sum(!idx2rem), "most variable peaks"))
dsr <- removeRegions(dsr, idx2rem, it1regionType)
}
peakCoords <- ChrAccR::getCoord(dsr, it1regionType)
logger.completed()
logger.completed()
}
logger.completed()
logger.start("Iteration 2")
it2regionType <- it1regionType
logger.start(c("Performing TF-IDF-based dimension reduction"))
bcm_unnorm <- ChrAccR::getCounts(dsr, it2regionType, allowSparseMatrix=TRUE) > 0 # unnormalized binary count matrix
idfBase <- log(1 + ncol(bcm_unnorm) / safeMatrixStats(bcm_unnorm, "rowSums", na.rm=TRUE))
dsn <- transformCounts(dsr, method="tf-idf", regionTypes=it2regionType) #TODO: renormalize based on sequencing depth rather than aggregated counts across peaks only?
cm <- ChrAccR::getCounts(dsn, it2regionType, allowSparseMatrix=TRUE)
pcaCoord <- muRtools::getDimRedCoords.pca(safeMatrixStats(cm, "t"), components=1:max(it2pcs), method="irlba_svd")
pcaCoord_sel <- pcaCoord[, it2pcs, drop=FALSE]
logger.completed()
logger.start(c("Clustering"))
sObj <- Seurat::CreateSeuratObject(dummyMat, project='scATAC', min.cells=0, min.features=0, assay="ATAC")
sObj[["pca"]] <- Seurat::CreateDimReducObject(embeddings=pcaCoord_sel, key="PC_", assay="ATAC")
sObj <- Seurat::FindNeighbors(sObj, reduction="pca", assay="ATAC", dims=1:ncol(pcaCoord_sel), k.param=30)
clustRes <- Seurat::FindClusters(sObj, k.param=30, algorithm=1, n.start=100, n.iter=10, resolution=it2clusterResolution)
clustAss <- factor(paste0("c", clustRes@active.ident), levels=paste0("c", levels(clustRes@active.ident)))
names(clustAss) <- names(clustRes@active.ident)
logger.info(c("Number of clusters found:", nlevels(clustAss)))
dsr <- addSampleAnnotCol(dsr, "clustAss_it2", as.character(clustAss[cellIds]))
logger.completed()
logger.start(c("UMAP coordinates"))
umapCoord <- muRtools::getDimRedCoords.umap(pcaCoord_sel)
umapRes <- attr(umapCoord, "umapRes")
attr(umapCoord, "umapRes") <- NULL
logger.completed()
logger.completed()
res <- list(
pcaCoord=pcaCoord,
pcs = it2pcs,
idfBase=idfBase,
umapCoord=umapCoord,
umapRes=umapRes,
clustAss=clustAss,
regionGr=peakCoords,
clusterPeaks_unfiltered=peakUnionGr,
iterationData = list(
iteration0 = list(
pcaCoord=pcaCoord_it0,
clustAss=clustAss_it0
),
iteration1 = list(
pcaCoord=pcaCoord_it1,
clustAss=clustAss_it1
)
),
.params=callParams
)
class(res) <- "iterativeLSIResultSc"
return(res)
}
)
|
appCSS <- "
#loading-content {
position: absolute;
background: #FFFFFF;
opacity: 0.95;
z-index: 100;
left: 0;
right: 0;
height: 100%;
text-align: center;
color: #808080;
}
"
renderDrugName <- function() {
( htmlOutput('drugname') )
}
renderLimit <- function() {
( htmlOutput('limit') )
}
renderStart <- function() {
( htmlOutput('start') )
}
renderStart2 <- function() {
( htmlOutput('start2') )
}
shinyUI(fluidPage(
useShinyjs(),
inlineCSS(appCSS),
div(
id = "loading-content",
h1("Please Wait")%>%withSpinner(proxy.height='300px')
),
div(
id = "main_content",
fluidRow(
column(width=4,
a(href='https://www.canada.ca/en/health-canada/services/drugs-health-products/medeffect-canada/canada-vigilance-program.html',
img(src='healthcanada.png',width="150",height="150",align='bottom')),
renderDates()
),
column(width=8,
titlePanel("RR-Drug" ) )
),
sidebarLayout(
sidebarPanel(
# tabsetPanel(
# tabPanel('Select Inputs',
selectInput_p("v1", 'Drug Variable' ,c('Active_Ingredient','DRUGNAME'),
HTML( tt('drugvar1') ), tt('drugvar2'),
placement='top'),
conditionalPanel(
condition = "1 == 2",
selectizeInput("t1", "Active Ingredient",
choices=NULL),
selectizeInput("t1_1", "Name of Drug",
choices=NULL),
numericInput('limit', 'Maximum number of event terms', 50,
1, 100, step=1),
numericInput('start', 'Rank of first event', 1,
1, 999, step=1)
),
wellPanel(
bsButton("tabBut", "Select Drug and # of Events...", style='primary'),
br(),
renderDrugName(),
renderLimit(),
renderStart()
),
dateRangeInput('daterange', 'Use Reports between',
format="yyyy-mm-dd",start ="1965-01-30", end = Sys.Date() ),
bsModal( 'modalExample', "Enter Variables", "tabBut", size = "small",
htmlOutput('mymodal'),
conditionalPanel(
condition="input.v1=='Active_Ingredient'",
selectizeInput_p("ingname", "Active Ingredient",
choices= c("Start typing to search..."="",ing_choices),
HTML( tt('drugname1') ), tt('drugname2'),
placement='left')
),
conditionalPanel(
condition="input.v1=='DRUGNAME'",
selectizeInput_p("drugname", "Name of Drug",
choices= c("Start typing to search..."="",drug_choices),
HTML( tt('drugname1') ), tt('drugname2'),
placement='left')),
numericInput_p('limit2', 'Number of most frequent events to analyze:', 50,
1, 100, step=1,
HTML( tt('limit1') ), tt('limit2'),
placement='left'),
numericInput_p('start2', 'Start with ranked frquency #', 1,
1, 999, step=1,
HTML( tt('limit1') ), tt('limit2'),
placement='left'),
bsButton("update", "Update Variables", style='primary')),
bsAlert("alert"),
HTML( (loadhelp('overviewside') ) ) ),
# id='sidetabs', selected='Select Inputs')
# ),
mainPanel(
tabsetPanel(
tabPanel("PRR and ROR Results",
wellPanel(
htmlOutput( 'prrtitle' )
),
maketabset( c('prr2', 'cloudprr', 'textplot'),
types=c('datatable', "plot", 'plot'),
names=c("Table","Word Cloud", "text Plot"),
popheads = c(tt('prr1'), tt('word1'), tt('textplot1') ),
poptext = c( tt('prr5'), tt('wordPRR'), tt('textplot2') ) )
),
tabPanel("Analyzed Event Counts for Specified Drug" ,
wellPanel(
htmlOutput( 'alldrugtext' )
),
wordcloudtabset('cloudquery', 'specifieddrug',
popheads=c( tt('event1'), tt('word1') ),
poptext=c( tt('event2'), tt('word2') )
)
),
tabPanel("Analyzed Event Counts for All Drugs",
wellPanel(
htmlOutput( 'alltext' )
),
wordcloudtabset('cloudall', 'all2',
popheads=c( tt('event1'), tt('word1') ),
poptext=c( tt('event2'), tt('word2') ))
),
tabPanel("Counts For Drugs In Selected Reports",
wellPanel(
htmlOutput( 'cotitle' )
),
wordcloudtabset('cloudcoquery', 'coquery',
popheads=c( tt('codrug1'), tt('word1') ),
poptext=c( tt('codrug3'), tt('word2') ))
),
tabPanel("Counts For Indications In Selected Reports",
wellPanel(
htmlOutput( 'indtitle' )
),
wordcloudtabset('cloudindquery', 'indquery2',
popheads=c( tt('indication1'), tt('word1') ),
poptext=c( tt('indication2'), tt('word2') ) )
),
#tabPanel('Data Reference',
# HTML( renderiframe("https://www.canada.ca/en/health-canada/services/drugs-health-products/medeffect-canada/adverse-reaction-database.html"))),
tabPanel("Other Apps",
HTML( makeapplinks())
),
tabPanel('About',
HTML( (loadhelp('about')))),
id='maintabs', selected = 'PRR and ROR Results'
)
)
)
)
)
)
| /prrD_test/ui.R | no_license | hres/cvopenfda | R | false | false | 7,397 | r |
appCSS <- "
#loading-content {
position: absolute;
background: #FFFFFF;
opacity: 0.95;
z-index: 100;
left: 0;
right: 0;
height: 100%;
text-align: center;
color: #808080;
}
"
renderDrugName <- function() {
( htmlOutput('drugname') )
}
renderLimit <- function() {
( htmlOutput('limit') )
}
renderStart <- function() {
( htmlOutput('start') )
}
renderStart2 <- function() {
( htmlOutput('start2') )
}
shinyUI(fluidPage(
useShinyjs(),
inlineCSS(appCSS),
div(
id = "loading-content",
h1("Please Wait")%>%withSpinner(proxy.height='300px')
),
div(
id = "main_content",
fluidRow(
column(width=4,
a(href='https://www.canada.ca/en/health-canada/services/drugs-health-products/medeffect-canada/canada-vigilance-program.html',
img(src='healthcanada.png',width="150",height="150",align='bottom')),
renderDates()
),
column(width=8,
titlePanel("RR-Drug" ) )
),
sidebarLayout(
sidebarPanel(
# tabsetPanel(
# tabPanel('Select Inputs',
selectInput_p("v1", 'Drug Variable' ,c('Active_Ingredient','DRUGNAME'),
HTML( tt('drugvar1') ), tt('drugvar2'),
placement='top'),
conditionalPanel(
condition = "1 == 2",
selectizeInput("t1", "Active Ingredient",
choices=NULL),
selectizeInput("t1_1", "Name of Drug",
choices=NULL),
numericInput('limit', 'Maximum number of event terms', 50,
1, 100, step=1),
numericInput('start', 'Rank of first event', 1,
1, 999, step=1)
),
wellPanel(
bsButton("tabBut", "Select Drug and # of Events...", style='primary'),
br(),
renderDrugName(),
renderLimit(),
renderStart()
),
dateRangeInput('daterange', 'Use Reports between',
format="yyyy-mm-dd",start ="1965-01-30", end = Sys.Date() ),
bsModal( 'modalExample', "Enter Variables", "tabBut", size = "small",
htmlOutput('mymodal'),
conditionalPanel(
condition="input.v1=='Active_Ingredient'",
selectizeInput_p("ingname", "Active Ingredient",
choices= c("Start typing to search..."="",ing_choices),
HTML( tt('drugname1') ), tt('drugname2'),
placement='left')
),
conditionalPanel(
condition="input.v1=='DRUGNAME'",
selectizeInput_p("drugname", "Name of Drug",
choices= c("Start typing to search..."="",drug_choices),
HTML( tt('drugname1') ), tt('drugname2'),
placement='left')),
numericInput_p('limit2', 'Number of most frequent events to analyze:', 50,
1, 100, step=1,
HTML( tt('limit1') ), tt('limit2'),
placement='left'),
numericInput_p('start2', 'Start with ranked frquency #', 1,
1, 999, step=1,
HTML( tt('limit1') ), tt('limit2'),
placement='left'),
bsButton("update", "Update Variables", style='primary')),
bsAlert("alert"),
HTML( (loadhelp('overviewside') ) ) ),
# id='sidetabs', selected='Select Inputs')
# ),
mainPanel(
tabsetPanel(
tabPanel("PRR and ROR Results",
wellPanel(
htmlOutput( 'prrtitle' )
),
maketabset( c('prr2', 'cloudprr', 'textplot'),
types=c('datatable', "plot", 'plot'),
names=c("Table","Word Cloud", "text Plot"),
popheads = c(tt('prr1'), tt('word1'), tt('textplot1') ),
poptext = c( tt('prr5'), tt('wordPRR'), tt('textplot2') ) )
),
tabPanel("Analyzed Event Counts for Specified Drug" ,
wellPanel(
htmlOutput( 'alldrugtext' )
),
wordcloudtabset('cloudquery', 'specifieddrug',
popheads=c( tt('event1'), tt('word1') ),
poptext=c( tt('event2'), tt('word2') )
)
),
tabPanel("Analyzed Event Counts for All Drugs",
wellPanel(
htmlOutput( 'alltext' )
),
wordcloudtabset('cloudall', 'all2',
popheads=c( tt('event1'), tt('word1') ),
poptext=c( tt('event2'), tt('word2') ))
),
tabPanel("Counts For Drugs In Selected Reports",
wellPanel(
htmlOutput( 'cotitle' )
),
wordcloudtabset('cloudcoquery', 'coquery',
popheads=c( tt('codrug1'), tt('word1') ),
poptext=c( tt('codrug3'), tt('word2') ))
),
tabPanel("Counts For Indications In Selected Reports",
wellPanel(
htmlOutput( 'indtitle' )
),
wordcloudtabset('cloudindquery', 'indquery2',
popheads=c( tt('indication1'), tt('word1') ),
poptext=c( tt('indication2'), tt('word2') ) )
),
#tabPanel('Data Reference',
# HTML( renderiframe("https://www.canada.ca/en/health-canada/services/drugs-health-products/medeffect-canada/adverse-reaction-database.html"))),
tabPanel("Other Apps",
HTML( makeapplinks())
),
tabPanel('About',
HTML( (loadhelp('about')))),
id='maintabs', selected = 'PRR and ROR Results'
)
)
)
)
)
)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 48088
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 48002
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 48002
c
c Input Parameter (command line, file):
c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf07.01X-QBF.BB1-Zi.BB2-Zi.BB3-Zi.with-IOC.unfold-005.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 19284
c no.of clauses 48088
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 48002
c
c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf07.01X-QBF.BB1-Zi.BB2-Zi.BB3-Zi.with-IOC.unfold-005.qdimacs 19284 48088 E1 [3329 3331 3333 3335 3337 3339 3341 3343 3345 3347 3349 3351 3353 3355 3357 3359 3361 3365 3373 3375 3379 3381 3383 3385 3387 3389 3391 3393 3395 3397 3399 3401 3403 3405 3407 3409 3411 3413 3415 3417 3419 3421 3435] 0 227 17142 48002 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf07.01X-QBF.BB1-Zi.BB2-Zi.BB3-Zi.with-IOC.unfold-005/biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf07.01X-QBF.BB1-Zi.BB2-Zi.BB3-Zi.with-IOC.unfold-005.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 1,160 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 48088
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 48002
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 48002
c
c Input Parameter (command line, file):
c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf07.01X-QBF.BB1-Zi.BB2-Zi.BB3-Zi.with-IOC.unfold-005.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 19284
c no.of clauses 48088
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 48002
c
c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF03-c03.blif-biu.inv.prop.bb-bmc.conf07.01X-QBF.BB1-Zi.BB2-Zi.BB3-Zi.with-IOC.unfold-005.qdimacs 19284 48088 E1 [3329 3331 3333 3335 3337 3339 3341 3343 3345 3347 3349 3351 3353 3355 3357 3359 3361 3365 3373 3375 3379 3381 3383 3385 3387 3389 3391 3393 3395 3397 3399 3401 3403 3405 3407 3409 3411 3413 3415 3417 3419 3421 3435] 0 227 17142 48002 RED
|
###############################################################################
# This is an R script that creates the fourth plot (plot4.png)
# required for the Coursera Exploratory Data Analysis course,
# week-1 assignment:
# A 2x2 matrix containing 4 plots in 2 rows and 2 columns:
# - Top Left: (plot2 except no units on Y label)
# - Bottom Left: (plot3)
# - Top Right: a line graph with Date + Time on x-axis and Voltage on y-axis
# - Bottom Right: a line graph with Date + Time on x-axis and
# Global_reactive_power on y-axis
###############################################################################
###############################################################################
# Load all required libraries.
library(dplyr)
library(lubridate)
###############################################################################
# 0) Load all of the required data from the data set.
# download files
if (!file.exists("exdata_data_household_power_consumption.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile = "exdata_data_household_power_consumption.zip")
unzip("exdata_data_household_power_consumption.zip")
}
###############################################################################
# Loading the data:
# When loading the dataset into R, please consider the following:
# - The dataset has 2,075,259 rows and 9 columns.
# - We will only be using data from the dates 2007-02-01 and 2007-02-02.
# - You may find it useful to convert the Date and Time variables to Date/Time
# classes in R using the strptime() and as.Date() functions.
# - Note that in this dataset missing values are coded as ?.
power_data <- as_tibble(read.csv("household_power_consumption.txt", header=TRUE,
skip=0, stringsAsFactors = FALSE, sep = ";"))
# Column names:
# Date;Time;Global_active_power;Global_reactive_power;Voltage;Global_intensity;
# Sub_metering_1;Sub_metering_2;Sub_metering_3
# Convert the values in the Date column from character & filter to 2 required days.
power_data <- mutate(power_data, Date = dmy(Date))
power_data <- filter(power_data,
Date == ymd("2007-02-01") | Date == ymd("2007-02-02"))
# Convert the remaining column values from character.
power_data <- mutate(power_data,
Time = hms(Time),
Global_active_power = as.numeric(Global_active_power),
Global_reactive_power = as.numeric(Global_reactive_power),
Voltage = as.numeric(Voltage),
Global_intensity = as.numeric(Global_intensity),
Sub_metering_1 = as.numeric(Sub_metering_1),
Sub_metering_2 = as.numeric(Sub_metering_2),
Sub_metering_3 = as.numeric(Sub_metering_3))
# Filter out missing data.
power_data <- filter(power_data, complete.cases(power_data))
###############################################################################
# Making Plots:
# Our overall goal here is simply to examine how household energy
# usage varies over a 2-day period in February, 2007. Your task is to
# reconstruct the following plots below, all of which were constructed using the
# base plotting system.
#
# For each plot you should:
# - Construct the plot and save it to a PNG file with a width of 480 pixels and
# a height of 480 pixels.
# - Name each of the plot files as plot1.png, plot2.png, etc.
###############################################################################
###############################################################################
###############################################################################
# Construct plot4:
# A 2x2 matrix containing 4 plots in 2 rows and 2 columns:
# - Top Left: (plot2 except no units on Y label)
# - Bottom Left: (plot3)
# - Top Right: a line graph with Date + Time on x-axis and Voltage on y-axis
# - Bottom Right: a line graph with Date + Time on x-axis and
# Global_reactive_power on y-axis
# Open the png file graphic device with the required size.
png(filename = "plot4.png", width = 480, height = 480, units = "px")
# Setup to plot a 2x2 matrix of plots, filling in by column
par(mfcol = c(2, 2))
###############################################################################
# Top Left Plot: copy of plot2 code except no units on Y label
plot(power_data$Date + power_data$Time,
power_data$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power",
main = "")
###############################################################################
# Bottom Left Plot: copy of plot3 code
# Construct the initial plot with Sub_metering_1 line graph
plot(power_data$Date + power_data$Time,
power_data$Sub_metering_1,
type = "l",
xlab = "",
ylab = "Energy sub metering",
main = "")
# Add Sub_metering_2 line graph
lines(power_data$Date + power_data$Time,
power_data$Sub_metering_2, col = "red")
# Add Sub_metering_3 line graph
lines(power_data$Date + power_data$Time,
power_data$Sub_metering_3, col = "blue")
# Add a legend at the top right corner of the plot
legend("topright",
lty = 1,
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
###############################################################################
# Top Right Plot:
# - a line graph with Date + Time on x-axis and Voltage on y-axis
# - X label: datetime
# - Y label: (black) Voltage
# - Title: none
plot(power_data$Date + power_data$Time,
power_data$Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage",
main = "")
###############################################################################
# Bottom Right Plot:
# - a line graph with Date + Time on x-axis and Voltage on y-axis
# - X label: datetime
# - Y label: (black) Global_reactive_power
# - Title: none
plot(power_data$Date + power_data$Time,
power_data$Global_reactive_power,
type = "l",
xlab = "datetime",
ylab = "Global_reactive_power",
main = "")
# Close the png graphic device.
dev.off()
###############################################################################
############################# End of file. #######################
############################################################################### | /plot4.R | no_license | mausha/ExData_Plotting1 | R | false | false | 6,579 | r | ###############################################################################
# This is an R script that creates the fourth plot (plot4.png)
# required for the Coursera Exploratory Data Analysis course,
# week-1 assignment:
# A 2x2 matrix containing 4 plots in 2 rows and 2 columns:
# - Top Left: (plot2 except no units on Y label)
# - Bottom Left: (plot3)
# - Top Right: a line graph with Date + Time on x-axis and Voltage on y-axis
# - Bottom Right: a line graph with Date + Time on x-axis and
# Global_reactive_power on y-axis
###############################################################################
###############################################################################
# Load all required libraries.
library(dplyr)
library(lubridate)
###############################################################################
# 0) Load all of the required data from the data set.
# download files
if (!file.exists("exdata_data_household_power_consumption.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile = "exdata_data_household_power_consumption.zip")
unzip("exdata_data_household_power_consumption.zip")
}
###############################################################################
# Loading the data:
# When loading the dataset into R, please consider the following:
# - The dataset has 2,075,259 rows and 9 columns.
# - We will only be using data from the dates 2007-02-01 and 2007-02-02.
# - You may find it useful to convert the Date and Time variables to Date/Time
# classes in R using the strptime() and as.Date() functions.
# - Note that in this dataset missing values are coded as ?.
power_data <- as_tibble(read.csv("household_power_consumption.txt", header=TRUE,
skip=0, stringsAsFactors = FALSE, sep = ";"))
# Column names:
# Date;Time;Global_active_power;Global_reactive_power;Voltage;Global_intensity;
# Sub_metering_1;Sub_metering_2;Sub_metering_3
# Convert the values in the Date column from character & filter to 2 required days.
power_data <- mutate(power_data, Date = dmy(Date))
power_data <- filter(power_data,
Date == ymd("2007-02-01") | Date == ymd("2007-02-02"))
# Convert the remaining column values from character.
power_data <- mutate(power_data,
Time = hms(Time),
Global_active_power = as.numeric(Global_active_power),
Global_reactive_power = as.numeric(Global_reactive_power),
Voltage = as.numeric(Voltage),
Global_intensity = as.numeric(Global_intensity),
Sub_metering_1 = as.numeric(Sub_metering_1),
Sub_metering_2 = as.numeric(Sub_metering_2),
Sub_metering_3 = as.numeric(Sub_metering_3))
# Filter out missing data.
power_data <- filter(power_data, complete.cases(power_data))
###############################################################################
# Making Plots:
# Our overall goal here is simply to examine how household energy
# usage varies over a 2-day period in February, 2007. Your task is to
# reconstruct the following plots below, all of which were constructed using the
# base plotting system.
#
# For each plot you should:
# - Construct the plot and save it to a PNG file with a width of 480 pixels and
# a height of 480 pixels.
# - Name each of the plot files as plot1.png, plot2.png, etc.
###############################################################################
###############################################################################
###############################################################################
# Construct plot4:
# A 2x2 matrix containing 4 plots in 2 rows and 2 columns:
# - Top Left: (plot2 except no units on Y label)
# - Bottom Left: (plot3)
# - Top Right: a line graph with Date + Time on x-axis and Voltage on y-axis
# - Bottom Right: a line graph with Date + Time on x-axis and
# Global_reactive_power on y-axis
# Open the png file graphic device with the required size.
png(filename = "plot4.png", width = 480, height = 480, units = "px")
# Setup to plot a 2x2 matrix of plots, filling in by column
par(mfcol = c(2, 2))
###############################################################################
# Top Left Plot: copy of plot2 code except no units on Y label
plot(power_data$Date + power_data$Time,
power_data$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power",
main = "")
###############################################################################
# Bottom Left Plot: copy of plot3 code
# Construct the initial plot with Sub_metering_1 line graph
plot(power_data$Date + power_data$Time,
power_data$Sub_metering_1,
type = "l",
xlab = "",
ylab = "Energy sub metering",
main = "")
# Add Sub_metering_2 line graph
lines(power_data$Date + power_data$Time,
power_data$Sub_metering_2, col = "red")
# Add Sub_metering_3 line graph
lines(power_data$Date + power_data$Time,
power_data$Sub_metering_3, col = "blue")
# Add a legend at the top right corner of the plot
legend("topright",
lty = 1,
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
###############################################################################
# Top Right Plot:
# - a line graph with Date + Time on x-axis and Voltage on y-axis
# - X label: datetime
# - Y label: (black) Voltage
# - Title: none
plot(power_data$Date + power_data$Time,
power_data$Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage",
main = "")
###############################################################################
# Bottom Right Plot:
# - a line graph with Date + Time on x-axis and Voltage on y-axis
# - X label: datetime
# - Y label: (black) Global_reactive_power
# - Title: none
plot(power_data$Date + power_data$Time,
power_data$Global_reactive_power,
type = "l",
xlab = "datetime",
ylab = "Global_reactive_power",
main = "")
# Close the png graphic device.
dev.off()
###############################################################################
############################# End of file. #######################
############################################################################### |
rm(list=ls())
library(dplyr)
library(tidyr)
library(GenomicRanges)
library(ggplot2)
library(ggrepel)
library(BSgenome.Hsapiens.UCSC.hg19)
library(Biostrings)
source("~/Desktop/PalmTrees/Analysis/Code/CustomThemes.R")
load("~/Desktop/PalmTrees/Analysis/WorkspaceData/SvabaCircleJunctions.Rdata")
load("~/Desktop/PalmTrees/Analysis/WorkspaceData/Circles.Rdata")
samples = unique(svaba_junctions$Sample)
for (sample in samples){
svaba_junctions %>%
filter(Sample == sample) %>%
dplyr::select(ChrA,PosA,PosB) %>%
write.table(paste0("~/Desktop/PalmTrees/Results/Tables/SvabaCircles/", sample, "_SvabaPreciseJunctions.bed"),
quote=F, sep="\t", col.names=F, row.names=F)
}
########################################################
########## SvabaCircleStats ############
########################################################
svaba_junctions %>%
group_by(Sample) %>%
summarise(nCircles = n()) %>%
ungroup() %>%
mutate(Sample = forcats::fct_reorder(Sample,-nCircles)) %>%
ggplot(aes(x=Sample, y=nCircles))+
geom_col(fill="grey25") +
xlab("") +
ylab("Number of Circles") +
ggtitle("Svaba Circles") +
theme_kons1() +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/SvabaCircles_NumberBySample.pdf", width=4, height=2, useDingbats=F)
# Correlate Circle-seq vs Svaba Call call number in general
CircleSeqNumberOfCircles = circles %>%
filter(Method=="CircleSeq") %>%
group_by(Sample) %>%
summarise(CircleSeqCircles = n()) %>%
ungroup()
svaba_junctions %>%
group_by(Sample) %>%
summarise(SvabaCircles = n()) %>%
ungroup() %>%
full_join(CircleSeqNumberOfCircles) %>%
ggplot(aes(x=CircleSeqCircles, y=SvabaCircles)) +
geom_point() +
geom_text_repel(aes(label=Sample), size=3) +
theme_kons1() +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/SvabaCircles_NumberBySample_CorrelationWithCircleSeq.pdf", width=5, height=5, useDingbats=F)
svaba_junctions %>%
mutate(CircleLength = PosB-PosA) %>%
ggplot(aes(x=CircleLength)) +
geom_density(fill="grey25", color=NA) +
scale_x_continuous(trans="log1p", breaks=c(100,300,1000,3000,10000)) +
theme_kons1() +
xlab("Circle Length [bp]") +
ylab("") +
ggtitle("Svaba Circle Lengths") +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/SvabaCircles_CircleLength.pdf", width=3, height=2, useDingbats=F)
svaba_junctions %>%
mutate(CircleLength = PosB-PosA) %>%
ggplot(aes(x=CircleLength)) +
geom_density(fill="grey25", color=NA) +
scale_x_continuous(trans="log1p", breaks=c(100,300,1000,3000,10000)) +
theme_kons1() +
xlab("Circle Length [bp]") +
ylab("") +
ggtitle("Svaba Circle Lengths") +
facet_wrap(Sample~.) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/SvabaCircles_CircleLength_bySample.pdf", width=10, height=10, useDingbats=F)
svaba_junctions %>%
mutate(CircleLength = PosB-PosA) %>%
filter(CircleLength < 1500) %>%
ggplot(aes(x=CircleLength)) +
geom_density(bw=3, fill="grey25", color=NA) +
theme_kons1() +
xlab("Circle Length [bp]") +
ylab("") +
ggtitle("Svaba Circle Lengths") +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/SvabaCircles_CircleLength_Less1kb.pdf", width=3, height=2, useDingbats=F)
##################################################
########## HOMOLOGIES ############
##################################################
svaba_junctions %>%
mutate(hasHomology = HomologyLength>0) %>%
.$hasHomology %>% mean()
# 61.5% of precisely reconstructable circle junctions have an homology
svaba_junctions %>%
mutate(hasHomology = HomologyLength>=5) %>%
.$hasHomology %>% mean()
# 6% of precisely reconstructable circle junctions have a homology of at least 5bp
svaba_junctions %>%
mutate(hasHomology = HomologyLength>=10) %>%
.$hasHomology %>% mean()
# 0.4% of precisely reconstructable circle junctions have a homology of at least 5bp
svaba_junctions %>%
ggplot(aes(x=HomologyLength)) +
geom_histogram(fill="steelblue", color=NA) +
theme_kons1() +
xlab("Homology Length at \n Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologyLength.pdf", width=3, height=2, useDingbats=F)
svaba_junctions %>%
ggplot(aes(x=HomologyLength)) +
geom_histogram(fill="steelblue", color=NA) +
theme_kons1() +
xlab("Homology Length at Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
facet_wrap(Sample~.) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologyLength_bySample.pdf", width=6, height=6, useDingbats=F)
svaba_junctions %>%
ggplot(aes(x=HomologyLength)) +
geom_density(bw=2, fill="steelblue", color=NA) +
theme_kons1() +
xlab("Homology Length at Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
facet_wrap(Sample~.) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologyLength_bySample.pdf", width=6, height=6, useDingbats=F)
# Analyse Sequence
library(Biostrings)
svaba_junctions_seqs = svaba_junctions %>%
dplyr::select(Sample, BNDPairID, HomologyLength, Homology) %>%
distinct() %>%
filter(HomologyLength>=5) %>%
dplyr::select(BNDPairID, Homology)
svaba_junctions_seqs_xstr = DNAStringSet(svaba_junctions_seqs$Homology)
names(svaba_junctions_seqs_xstr) = svaba_junctions_seqs$BNDPairID
writeXStringSet(svaba_junctions_seqs_xstr, paste0("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_Homology_Min5.fa"))
## Analyse how that compares to randomized breakpoints
source("~/Desktop/PalmTrees/Analysis/Code/myHomology.R")
library(parallel)
cores=detectCores()
# This is computed on the cluster because I wrote insanely slow code ------------------------------------------------------------------
# svaba_junctions$myHomology = mcmapply(getHomologySvabaBreakpoints,
# svaba_junctions$ChrA, svaba_junctions$PosA, svaba_junctions$DirectionA,
# svaba_junctions$ChrB, svaba_junctions$PosB, svaba_junctions$DirectionB,
# mc.cores=cores)
# svaba_junctions$myHomologyLength = nchar(svaba_junctions$myHomology)
#
# unpermuted_svaba_junctions = svaba_junctions %>% filter(isPrecise) %>% dplyr::select(Sample, ChrA, PosA, DirectionA, ChrB, PosB, DirectionB, CircleGenomeClass, isInUnionPT) %>% distinct()
#
# permuted_svaba_junctions = unpermuted_svaba_junctions
# B_Perm_i = sample.int(nrow(unpermuted_svaba_junctions),nrow(unpermuted_svaba_junctions),replace=F)
# permuted_svaba_junctions[, "ChrB"] = permuted_svaba_junctions[B_Perm_i, "ChrB"]
# permuted_svaba_junctions[, "PosB"] = permuted_svaba_junctions[B_Perm_i, "PosB"]
# permuted_svaba_junctions[, "DirectionB"] = permuted_svaba_junctions[B_Perm_i, "DirectionB"]
# permuted_svaba_junctions$myHomology = mcmapply(getHomologySvabaBreakpoints,
# permuted_svaba_junctions$ChrA, permuted_svaba_junctions$PosA, permuted_svaba_junctions$DirectionA,
# permuted_svaba_junctions$ChrB, permuted_svaba_junctions$PosB, permuted_svaba_junctions$DirectionB,
# mc.cores=cores)
# permuted_svaba_junctions$myHomologyLength = nchar(permuted_svaba_junctions$myHomology)
# permuted_svaba_junctions$isPermuted = "Randomized"
#
# unpermuted_svaba_junctions$myHomology = mcmapply(getHomologySvabaBreakpoints,
# unpermuted_svaba_junctions$ChrA, unpermuted_svaba_junctions$PosA, unpermuted_svaba_junctions$DirectionA,
# unpermuted_svaba_junctions$ChrB, unpermuted_svaba_junctions$PosB, unpermuted_svaba_junctions$DirectionB,
# mc.cores=cores)
# unpermuted_svaba_junctions$myHomologyLength = nchar(unpermuted_svaba_junctions$myHomology)
# unpermuted_svaba_junctions$isPermuted = "Real"
# permuted_and_unpermuted_svaba_junctions = bind_rows(permuted_svaba_junctions, unpermuted_svaba_junctions)
# Load the results from the cluster
load("~/Desktop/PalmTrees/Analysis/WorkspaceData/CircleJucntionAnalyseHomologyLengthVsRandom_OnCluster.Rdata")
ggplot(data=svaba_junctions, aes(x=HomologyLength, y=myHomologyLength)) +
geom_point(size=1) +
xlab(paste0("Svaba Homology Length \n\n",
"Percent Sequence Identical: ", sprintf("%.2f",mean(svaba_junctions$Homology==svaba_junctions$myHomology, na.rm=T)), "\n",
"Percent Length Identical: ", sprintf("%.2f",mean(svaba_junctions$HomologyLength==svaba_junctions$myHomologyLength, na.rm=T)), "\n",
"SvabaHomology Percent >0bp: ", sprintf("%.2f",mean(svaba_junctions$HomologyLength>0, na.rm=T)), "\n",
"MyHomology Percent >0bp: ", sprintf("%.2f",mean(svaba_junctions$myHomologyLength>0, na.rm=T)), "\n",
"SvabaHomology Percent >=5bp: ", sprintf("%.2f",mean(svaba_junctions$HomologyLength>=5, na.rm=T)), "\n",
"MyHomology Percent >=5bp: ", sprintf("%.2f",mean(svaba_junctions$myHomologyLength>=5, na.rm=T)), "\n",
"SvabaHomology Percent >=10bp: ", sprintf("%.2f",mean(svaba_junctions$HomologyLength>=10, na.rm=T)), "\n",
"MyHomology Percent >=10bp: ", sprintf("%.2f",mean(svaba_junctions$myHomologyLength>=10, na.rm=T)), "\n",
"Mean SvabaHomology: ", sprintf("%.2f",mean(svaba_junctions$HomologyLength, na.rm=T)), "\n",
"Mean myHomology: ", sprintf("%.2f",mean(svaba_junctions$myHomologyLength, na.rm=T)), "\n",
"SD SvabaHomology: ",sprintf("%.2f",sd(svaba_junctions$HomologyLength, na.rm=T)), "\n",
"SD myHomology: ", sprintf("%.2f",sd(svaba_junctions$myHomologyLength, na.rm=T)), "\n",
"Median SvabaHomology: ", sprintf("%.2f",median(svaba_junctions$HomologyLength, na.rm=T)), "\n",
"Median myHomology: ", sprintf("%.2f",median(svaba_junctions$myHomologyLength, na.rm=T)), "\n",
"Mean Abs. Difference:", sprintf("%.2f", mean(abs(svaba_junctions$HomologyLength-svaba_junctions$myHomologyLength), na.rm=T)), "\n",
"Median Abs. Difference:", sprintf("%.2f", median(abs(svaba_junctions$HomologyLength-svaba_junctions$myHomologyLength), na.rm=T)), "\n",
"Correlation: ", sprintf("%.2f",cor.test(svaba_junctions$HomologyLength, svaba_junctions$myHomologyLength, use="complete.obs")$estimate)
)) +
ggtitle("Svaba Circle Junctions") +
theme_kons1() +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/myHomologyVsSvabaHomologyScatter.pdf", height=8, width=5.5)
ggplot(data=svaba_junctions, aes(x=HomologyLength-myHomologyLength)) +
geom_histogram(binwidth = 1) +
ggtitle("Svaba Circle Junctions") +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/myHomologyVsSvabaHomology.pdf")
permuted_and_unpermuted_svaba_junctions %>%
group_by(isPermuted) %>%
summarise(MedianHomologyLength = median(myHomologyLength, na.rm=T),
MeanHomologyLength = mean(myHomologyLength, na.rm=T))
t.test(myHomologyLength~isPermuted, data=permuted_and_unpermuted_svaba_junctions, alternative="two.sided") %>%
capture.output() %>%
writeLines("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologiesVsRandom_ttest.txt")
permuted_and_unpermuted_svaba_junctions %>%
ggplot(aes(x=myHomologyLength, fill=isPermuted)) +
geom_density(bw=2, alpha=0.5) +
xlab("Homology Lengths") +
ylab("Density") +
xlim(0,30) +
theme_kons1() +
ggtitle("Svaba Circle Junctions") +
scale_color_manual(values=c("Randomized"="grey50", "Real"="red")) +
scale_fill_manual(values=c("Randomized"="grey50", "Real"="red")) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologiesVsRandomDensity.pdf", height=2.5, width=4)
permuted_and_unpermuted_svaba_junctions %>%
ggplot(aes(x=isPermuted, y=myHomologyLength, fill=isPermuted,color=isPermuted)) +
geom_jitter(alpha=0.1, size=1) +
xlab("Homology Lengths") +
ylab("Density") +
theme_kons1() +
scale_y_continuous(trans="log1p") +
scale_color_manual(values=c("Randomized"="grey50", "Real"="red")) +
scale_fill_manual(values=c("Randomized"="grey50", "Real"="red")) +
ggtitle("Svaba Circle Junctions") +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologiesVsRandomDots.pdf", height=4, width=5)
##################################################
########## INSERTIONS ############
##################################################
svaba_junctions %>%
mutate(hasInsertion = InsertionLength>0) %>%
.$hasInsertion %>% mean()
# 2.8% of precisely reconstructable circle junctions have an insertion
svaba_junctions %>%
mutate(hasInsertion = InsertionLength>=5) %>%
.$hasInsertion %>% mean()
# 1.8% of precisely reconstructable circle junctions have an insertions of at least 5bp
svaba_junctions %>%
mutate(hasInsertion = InsertionLength>=10) %>%
.$hasInsertion %>% mean()
# 0.8% of precisely reconstructable circle junctions have an insertions of at least 5bp
svaba_junctions %>%
distinct() %>%
.$InsertionLength %>% table()
svaba_junctions %>%
distinct() %>%
ggplot(aes(x=InsertionLength)) +
geom_histogram(fill="firebrick2", color=NA) +
theme_kons1() +
xlab("Insertion Length at Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
scale_x_continuous(trans="log1p", breaks = c(0,3,10,30,100,300)) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/InsertionLengths.pdf", height=2, width=4)
svaba_junctions %>%
distinct() %>%
filter(InsertionLength>0) %>%
ggplot(aes(x=InsertionLength)) +
geom_histogram(fill="firebrick2", color=NA) +
theme_kons1() +
xlab("Insertion Length at \n Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
scale_x_continuous(trans="log1p", breaks = c(0,3,10,30,100,300)) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/InsertionLengthsLargerZero.pdf", height=2, width=3)
svaba_junctions %>%
distinct() %>%
filter(InsertionLength>0) %>%
ggplot(aes(x=InsertionLength)) +
geom_density(bw=1,fill="firebrick2", color=NA) +
theme_kons1() +
xlab("Insertion Length at \n Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
scale_x_continuous(trans="log1p", breaks = c(0,3,10,30,100,300)) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/InsertionLengthsLargerZero_Density.pdf", height=2, width=3)
svaba_junctions %>%
distinct() %>%
filter(InsertionLength>0) %>%
ggplot(aes(x=InsertionLength, color=Sample)) +
geom_density(bw=1,fill=NA) +
scale_color_grey() +
theme_kons1() +
xlab("Insertion Length at \n Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
guides(color=F, fill=F)+
scale_x_continuous(trans="log1p", breaks = c(0,3,10,30,100,300)) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/InsertionLengthsLargerZero_bySample_Density.pdf", height=2, width=3)
library(Biostrings)
svaba_junctions_seqs = svaba_junctions %>%
dplyr::select(Sample, ChrA, PosA, ChrB, PosB, InsertionLength, Insertion) %>%
mutate(ID = paste0(Sample, "_", ChrA, ":", PosA, "_", ChrB, ":", PosB)) %>%
distinct() %>%
filter(InsertionLength>=20) %>%
dplyr::select(ID, Insertion)
svaba_junctions_seqs_xstr = DNAStringSet(svaba_junctions_seqs$Insertion)
names(svaba_junctions_seqs_xstr) =svaba_junctions_seqs$ID
writeXStringSet(svaba_junctions_seqs_xstr, paste0("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_Insertions_Min20bp.fa"))
svaba_junctions_seqs = svaba_junctions %>%
dplyr::select(Sample, ChrA, PosA, ChrB, PosB, InsertionLength, Insertion) %>%
mutate(ID = paste0(Sample, "_", ChrA, ":", PosA, "_", ChrB, ":", PosB)) %>%
distinct() %>%
filter(InsertionLength>=10) %>%
dplyr::select(ID, Insertion)
svaba_junctions_seqs_xstr = DNAStringSet(svaba_junctions_seqs$Insertion)
names(svaba_junctions_seqs_xstr) =svaba_junctions_seqs$ID
writeXStringSet(svaba_junctions_seqs_xstr, paste0("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_Insertions_Min10bp.fa"))
#system("source activate motifs; meme ~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_Insertions_Min10bp.fa -revcomp -nmotifs 10 -dna -oc ~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_Insertions_Min10bp_MEME")
#############################################################
########## MOTIFS AT BREAKPOINTS ############
#############################################################
IntervalLength = 20
svaba_junctions = svaba_junctions %>%
mutate(ID = paste0(Sample, "_", ChrA, ":", PosA, "_", ChrB, ":", PosB))
BreakpointIntervals = data.frame(
ID = c(paste0(svaba_junctions$ID, "_Start"), paste0(svaba_junctions$ID, "_End")),
chr = c(svaba_junctions$ChrA, svaba_junctions$ChrB),
start = c(svaba_junctions$PosA - ceiling(IntervalLength / 2), svaba_junctions$PosB - ceiling(IntervalLength / 2)),
end = c(svaba_junctions$PosA + ceiling(IntervalLength / 2), svaba_junctions$PosB + ceiling(IntervalLength / 2)))
BreakpointIntervals$strand = "*"
BreakpointIntervals = BreakpointIntervals %>% distinct()
BreakpointIntervals.gr = makeGRangesFromDataFrame(df = BreakpointIntervals, seqnames.field = "chr", start.field = "start", end.field = "end", keep.extra.columns = TRUE)
seqlevels(BreakpointIntervals.gr, pruning.mode="coarse") = seqlevels(BSgenome.Hsapiens.UCSC.hg19)
seqinfo(BreakpointIntervals.gr) = seqinfo(BSgenome.Hsapiens.UCSC.hg19)
BreakpointIntervals.gr <- trim(BreakpointIntervals.gr)
seq = getSeq(BSgenome.Hsapiens.UCSC.hg19, BreakpointIntervals.gr, as.character=F)
seq = seq[width(seq)>20]
names(seq) = BreakpointIntervals.gr$ID
writeXStringSet(seq, paste0("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_BreakpointIntervals_41bp.fa"))
| /Analysis/Code/CircleJunctionAnalyseSvabaBreakpoints.R | no_license | sunqiangzai/TreeShapedRearrangements | R | false | false | 18,209 | r | rm(list=ls())
library(dplyr)
library(tidyr)
library(GenomicRanges)
library(ggplot2)
library(ggrepel)
library(BSgenome.Hsapiens.UCSC.hg19)
library(Biostrings)
source("~/Desktop/PalmTrees/Analysis/Code/CustomThemes.R")
load("~/Desktop/PalmTrees/Analysis/WorkspaceData/SvabaCircleJunctions.Rdata")
load("~/Desktop/PalmTrees/Analysis/WorkspaceData/Circles.Rdata")
samples = unique(svaba_junctions$Sample)
for (sample in samples){
svaba_junctions %>%
filter(Sample == sample) %>%
dplyr::select(ChrA,PosA,PosB) %>%
write.table(paste0("~/Desktop/PalmTrees/Results/Tables/SvabaCircles/", sample, "_SvabaPreciseJunctions.bed"),
quote=F, sep="\t", col.names=F, row.names=F)
}
########################################################
########## SvabaCircleStats ############
########################################################
svaba_junctions %>%
group_by(Sample) %>%
summarise(nCircles = n()) %>%
ungroup() %>%
mutate(Sample = forcats::fct_reorder(Sample,-nCircles)) %>%
ggplot(aes(x=Sample, y=nCircles))+
geom_col(fill="grey25") +
xlab("") +
ylab("Number of Circles") +
ggtitle("Svaba Circles") +
theme_kons1() +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/SvabaCircles_NumberBySample.pdf", width=4, height=2, useDingbats=F)
# Correlate Circle-seq vs Svaba Call call number in general
CircleSeqNumberOfCircles = circles %>%
filter(Method=="CircleSeq") %>%
group_by(Sample) %>%
summarise(CircleSeqCircles = n()) %>%
ungroup()
svaba_junctions %>%
group_by(Sample) %>%
summarise(SvabaCircles = n()) %>%
ungroup() %>%
full_join(CircleSeqNumberOfCircles) %>%
ggplot(aes(x=CircleSeqCircles, y=SvabaCircles)) +
geom_point() +
geom_text_repel(aes(label=Sample), size=3) +
theme_kons1() +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/SvabaCircles_NumberBySample_CorrelationWithCircleSeq.pdf", width=5, height=5, useDingbats=F)
svaba_junctions %>%
mutate(CircleLength = PosB-PosA) %>%
ggplot(aes(x=CircleLength)) +
geom_density(fill="grey25", color=NA) +
scale_x_continuous(trans="log1p", breaks=c(100,300,1000,3000,10000)) +
theme_kons1() +
xlab("Circle Length [bp]") +
ylab("") +
ggtitle("Svaba Circle Lengths") +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/SvabaCircles_CircleLength.pdf", width=3, height=2, useDingbats=F)
svaba_junctions %>%
mutate(CircleLength = PosB-PosA) %>%
ggplot(aes(x=CircleLength)) +
geom_density(fill="grey25", color=NA) +
scale_x_continuous(trans="log1p", breaks=c(100,300,1000,3000,10000)) +
theme_kons1() +
xlab("Circle Length [bp]") +
ylab("") +
ggtitle("Svaba Circle Lengths") +
facet_wrap(Sample~.) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/SvabaCircles_CircleLength_bySample.pdf", width=10, height=10, useDingbats=F)
svaba_junctions %>%
mutate(CircleLength = PosB-PosA) %>%
filter(CircleLength < 1500) %>%
ggplot(aes(x=CircleLength)) +
geom_density(bw=3, fill="grey25", color=NA) +
theme_kons1() +
xlab("Circle Length [bp]") +
ylab("") +
ggtitle("Svaba Circle Lengths") +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/SvabaCircles_CircleLength_Less1kb.pdf", width=3, height=2, useDingbats=F)
##################################################
########## HOMOLOGIES ############
##################################################
svaba_junctions %>%
mutate(hasHomology = HomologyLength>0) %>%
.$hasHomology %>% mean()
# 61.5% of precisely reconstructable circle junctions have an homology
svaba_junctions %>%
mutate(hasHomology = HomologyLength>=5) %>%
.$hasHomology %>% mean()
# 6% of precisely reconstructable circle junctions have a homology of at least 5bp
svaba_junctions %>%
mutate(hasHomology = HomologyLength>=10) %>%
.$hasHomology %>% mean()
# 0.4% of precisely reconstructable circle junctions have a homology of at least 5bp
svaba_junctions %>%
ggplot(aes(x=HomologyLength)) +
geom_histogram(fill="steelblue", color=NA) +
theme_kons1() +
xlab("Homology Length at \n Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologyLength.pdf", width=3, height=2, useDingbats=F)
svaba_junctions %>%
ggplot(aes(x=HomologyLength)) +
geom_histogram(fill="steelblue", color=NA) +
theme_kons1() +
xlab("Homology Length at Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
facet_wrap(Sample~.) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologyLength_bySample.pdf", width=6, height=6, useDingbats=F)
svaba_junctions %>%
ggplot(aes(x=HomologyLength)) +
geom_density(bw=2, fill="steelblue", color=NA) +
theme_kons1() +
xlab("Homology Length at Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
facet_wrap(Sample~.) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologyLength_bySample.pdf", width=6, height=6, useDingbats=F)
# Analyse Sequence
library(Biostrings)
svaba_junctions_seqs = svaba_junctions %>%
dplyr::select(Sample, BNDPairID, HomologyLength, Homology) %>%
distinct() %>%
filter(HomologyLength>=5) %>%
dplyr::select(BNDPairID, Homology)
svaba_junctions_seqs_xstr = DNAStringSet(svaba_junctions_seqs$Homology)
names(svaba_junctions_seqs_xstr) = svaba_junctions_seqs$BNDPairID
writeXStringSet(svaba_junctions_seqs_xstr, paste0("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_Homology_Min5.fa"))
## Analyse how that compares to randomized breakpoints
source("~/Desktop/PalmTrees/Analysis/Code/myHomology.R")
library(parallel)
cores=detectCores()
# This is computed on the cluster because I wrote insanely slow code ------------------------------------------------------------------
# svaba_junctions$myHomology = mcmapply(getHomologySvabaBreakpoints,
# svaba_junctions$ChrA, svaba_junctions$PosA, svaba_junctions$DirectionA,
# svaba_junctions$ChrB, svaba_junctions$PosB, svaba_junctions$DirectionB,
# mc.cores=cores)
# svaba_junctions$myHomologyLength = nchar(svaba_junctions$myHomology)
#
# unpermuted_svaba_junctions = svaba_junctions %>% filter(isPrecise) %>% dplyr::select(Sample, ChrA, PosA, DirectionA, ChrB, PosB, DirectionB, CircleGenomeClass, isInUnionPT) %>% distinct()
#
# permuted_svaba_junctions = unpermuted_svaba_junctions
# B_Perm_i = sample.int(nrow(unpermuted_svaba_junctions),nrow(unpermuted_svaba_junctions),replace=F)
# permuted_svaba_junctions[, "ChrB"] = permuted_svaba_junctions[B_Perm_i, "ChrB"]
# permuted_svaba_junctions[, "PosB"] = permuted_svaba_junctions[B_Perm_i, "PosB"]
# permuted_svaba_junctions[, "DirectionB"] = permuted_svaba_junctions[B_Perm_i, "DirectionB"]
# permuted_svaba_junctions$myHomology = mcmapply(getHomologySvabaBreakpoints,
# permuted_svaba_junctions$ChrA, permuted_svaba_junctions$PosA, permuted_svaba_junctions$DirectionA,
# permuted_svaba_junctions$ChrB, permuted_svaba_junctions$PosB, permuted_svaba_junctions$DirectionB,
# mc.cores=cores)
# permuted_svaba_junctions$myHomologyLength = nchar(permuted_svaba_junctions$myHomology)
# permuted_svaba_junctions$isPermuted = "Randomized"
#
# unpermuted_svaba_junctions$myHomology = mcmapply(getHomologySvabaBreakpoints,
# unpermuted_svaba_junctions$ChrA, unpermuted_svaba_junctions$PosA, unpermuted_svaba_junctions$DirectionA,
# unpermuted_svaba_junctions$ChrB, unpermuted_svaba_junctions$PosB, unpermuted_svaba_junctions$DirectionB,
# mc.cores=cores)
# unpermuted_svaba_junctions$myHomologyLength = nchar(unpermuted_svaba_junctions$myHomology)
# unpermuted_svaba_junctions$isPermuted = "Real"
# permuted_and_unpermuted_svaba_junctions = bind_rows(permuted_svaba_junctions, unpermuted_svaba_junctions)
# Load the results from the cluster
load("~/Desktop/PalmTrees/Analysis/WorkspaceData/CircleJucntionAnalyseHomologyLengthVsRandom_OnCluster.Rdata")
ggplot(data=svaba_junctions, aes(x=HomologyLength, y=myHomologyLength)) +
geom_point(size=1) +
xlab(paste0("Svaba Homology Length \n\n",
"Percent Sequence Identical: ", sprintf("%.2f",mean(svaba_junctions$Homology==svaba_junctions$myHomology, na.rm=T)), "\n",
"Percent Length Identical: ", sprintf("%.2f",mean(svaba_junctions$HomologyLength==svaba_junctions$myHomologyLength, na.rm=T)), "\n",
"SvabaHomology Percent >0bp: ", sprintf("%.2f",mean(svaba_junctions$HomologyLength>0, na.rm=T)), "\n",
"MyHomology Percent >0bp: ", sprintf("%.2f",mean(svaba_junctions$myHomologyLength>0, na.rm=T)), "\n",
"SvabaHomology Percent >=5bp: ", sprintf("%.2f",mean(svaba_junctions$HomologyLength>=5, na.rm=T)), "\n",
"MyHomology Percent >=5bp: ", sprintf("%.2f",mean(svaba_junctions$myHomologyLength>=5, na.rm=T)), "\n",
"SvabaHomology Percent >=10bp: ", sprintf("%.2f",mean(svaba_junctions$HomologyLength>=10, na.rm=T)), "\n",
"MyHomology Percent >=10bp: ", sprintf("%.2f",mean(svaba_junctions$myHomologyLength>=10, na.rm=T)), "\n",
"Mean SvabaHomology: ", sprintf("%.2f",mean(svaba_junctions$HomologyLength, na.rm=T)), "\n",
"Mean myHomology: ", sprintf("%.2f",mean(svaba_junctions$myHomologyLength, na.rm=T)), "\n",
"SD SvabaHomology: ",sprintf("%.2f",sd(svaba_junctions$HomologyLength, na.rm=T)), "\n",
"SD myHomology: ", sprintf("%.2f",sd(svaba_junctions$myHomologyLength, na.rm=T)), "\n",
"Median SvabaHomology: ", sprintf("%.2f",median(svaba_junctions$HomologyLength, na.rm=T)), "\n",
"Median myHomology: ", sprintf("%.2f",median(svaba_junctions$myHomologyLength, na.rm=T)), "\n",
"Mean Abs. Difference:", sprintf("%.2f", mean(abs(svaba_junctions$HomologyLength-svaba_junctions$myHomologyLength), na.rm=T)), "\n",
"Median Abs. Difference:", sprintf("%.2f", median(abs(svaba_junctions$HomologyLength-svaba_junctions$myHomologyLength), na.rm=T)), "\n",
"Correlation: ", sprintf("%.2f",cor.test(svaba_junctions$HomologyLength, svaba_junctions$myHomologyLength, use="complete.obs")$estimate)
)) +
ggtitle("Svaba Circle Junctions") +
theme_kons1() +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/myHomologyVsSvabaHomologyScatter.pdf", height=8, width=5.5)
ggplot(data=svaba_junctions, aes(x=HomologyLength-myHomologyLength)) +
geom_histogram(binwidth = 1) +
ggtitle("Svaba Circle Junctions") +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/myHomologyVsSvabaHomology.pdf")
permuted_and_unpermuted_svaba_junctions %>%
group_by(isPermuted) %>%
summarise(MedianHomologyLength = median(myHomologyLength, na.rm=T),
MeanHomologyLength = mean(myHomologyLength, na.rm=T))
t.test(myHomologyLength~isPermuted, data=permuted_and_unpermuted_svaba_junctions, alternative="two.sided") %>%
capture.output() %>%
writeLines("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologiesVsRandom_ttest.txt")
permuted_and_unpermuted_svaba_junctions %>%
ggplot(aes(x=myHomologyLength, fill=isPermuted)) +
geom_density(bw=2, alpha=0.5) +
xlab("Homology Lengths") +
ylab("Density") +
xlim(0,30) +
theme_kons1() +
ggtitle("Svaba Circle Junctions") +
scale_color_manual(values=c("Randomized"="grey50", "Real"="red")) +
scale_fill_manual(values=c("Randomized"="grey50", "Real"="red")) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologiesVsRandomDensity.pdf", height=2.5, width=4)
permuted_and_unpermuted_svaba_junctions %>%
ggplot(aes(x=isPermuted, y=myHomologyLength, fill=isPermuted,color=isPermuted)) +
geom_jitter(alpha=0.1, size=1) +
xlab("Homology Lengths") +
ylab("Density") +
theme_kons1() +
scale_y_continuous(trans="log1p") +
scale_color_manual(values=c("Randomized"="grey50", "Real"="red")) +
scale_fill_manual(values=c("Randomized"="grey50", "Real"="red")) +
ggtitle("Svaba Circle Junctions") +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/HomologiesVsRandomDots.pdf", height=4, width=5)
##################################################
########## INSERTIONS ############
##################################################
svaba_junctions %>%
mutate(hasInsertion = InsertionLength>0) %>%
.$hasInsertion %>% mean()
# 2.8% of precisely reconstructable circle junctions have an insertion
svaba_junctions %>%
mutate(hasInsertion = InsertionLength>=5) %>%
.$hasInsertion %>% mean()
# 1.8% of precisely reconstructable circle junctions have an insertions of at least 5bp
svaba_junctions %>%
mutate(hasInsertion = InsertionLength>=10) %>%
.$hasInsertion %>% mean()
# 0.8% of precisely reconstructable circle junctions have an insertions of at least 5bp
svaba_junctions %>%
distinct() %>%
.$InsertionLength %>% table()
svaba_junctions %>%
distinct() %>%
ggplot(aes(x=InsertionLength)) +
geom_histogram(fill="firebrick2", color=NA) +
theme_kons1() +
xlab("Insertion Length at Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
scale_x_continuous(trans="log1p", breaks = c(0,3,10,30,100,300)) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/InsertionLengths.pdf", height=2, width=4)
svaba_junctions %>%
distinct() %>%
filter(InsertionLength>0) %>%
ggplot(aes(x=InsertionLength)) +
geom_histogram(fill="firebrick2", color=NA) +
theme_kons1() +
xlab("Insertion Length at \n Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
scale_x_continuous(trans="log1p", breaks = c(0,3,10,30,100,300)) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/InsertionLengthsLargerZero.pdf", height=2, width=3)
svaba_junctions %>%
distinct() %>%
filter(InsertionLength>0) %>%
ggplot(aes(x=InsertionLength)) +
geom_density(bw=1,fill="firebrick2", color=NA) +
theme_kons1() +
xlab("Insertion Length at \n Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
scale_x_continuous(trans="log1p", breaks = c(0,3,10,30,100,300)) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/InsertionLengthsLargerZero_Density.pdf", height=2, width=3)
svaba_junctions %>%
distinct() %>%
filter(InsertionLength>0) %>%
ggplot(aes(x=InsertionLength, color=Sample)) +
geom_density(bw=1,fill=NA) +
scale_color_grey() +
theme_kons1() +
xlab("Insertion Length at \n Circle Junction [bp]") +
ylab("Count") +
ggtitle("Svaba Circle Junctions") +
guides(color=F, fill=F)+
scale_x_continuous(trans="log1p", breaks = c(0,3,10,30,100,300)) +
ggsave("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/InsertionLengthsLargerZero_bySample_Density.pdf", height=2, width=3)
library(Biostrings)
svaba_junctions_seqs = svaba_junctions %>%
dplyr::select(Sample, ChrA, PosA, ChrB, PosB, InsertionLength, Insertion) %>%
mutate(ID = paste0(Sample, "_", ChrA, ":", PosA, "_", ChrB, ":", PosB)) %>%
distinct() %>%
filter(InsertionLength>=20) %>%
dplyr::select(ID, Insertion)
svaba_junctions_seqs_xstr = DNAStringSet(svaba_junctions_seqs$Insertion)
names(svaba_junctions_seqs_xstr) =svaba_junctions_seqs$ID
writeXStringSet(svaba_junctions_seqs_xstr, paste0("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_Insertions_Min20bp.fa"))
svaba_junctions_seqs = svaba_junctions %>%
dplyr::select(Sample, ChrA, PosA, ChrB, PosB, InsertionLength, Insertion) %>%
mutate(ID = paste0(Sample, "_", ChrA, ":", PosA, "_", ChrB, ":", PosB)) %>%
distinct() %>%
filter(InsertionLength>=10) %>%
dplyr::select(ID, Insertion)
svaba_junctions_seqs_xstr = DNAStringSet(svaba_junctions_seqs$Insertion)
names(svaba_junctions_seqs_xstr) =svaba_junctions_seqs$ID
writeXStringSet(svaba_junctions_seqs_xstr, paste0("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_Insertions_Min10bp.fa"))
#system("source activate motifs; meme ~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_Insertions_Min10bp.fa -revcomp -nmotifs 10 -dna -oc ~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_Insertions_Min10bp_MEME")
#############################################################
########## MOTIFS AT BREAKPOINTS ############
#############################################################
IntervalLength = 20
svaba_junctions = svaba_junctions %>%
mutate(ID = paste0(Sample, "_", ChrA, ":", PosA, "_", ChrB, ":", PosB))
BreakpointIntervals = data.frame(
ID = c(paste0(svaba_junctions$ID, "_Start"), paste0(svaba_junctions$ID, "_End")),
chr = c(svaba_junctions$ChrA, svaba_junctions$ChrB),
start = c(svaba_junctions$PosA - ceiling(IntervalLength / 2), svaba_junctions$PosB - ceiling(IntervalLength / 2)),
end = c(svaba_junctions$PosA + ceiling(IntervalLength / 2), svaba_junctions$PosB + ceiling(IntervalLength / 2)))
BreakpointIntervals$strand = "*"
BreakpointIntervals = BreakpointIntervals %>% distinct()
BreakpointIntervals.gr = makeGRangesFromDataFrame(df = BreakpointIntervals, seqnames.field = "chr", start.field = "start", end.field = "end", keep.extra.columns = TRUE)
seqlevels(BreakpointIntervals.gr, pruning.mode="coarse") = seqlevels(BSgenome.Hsapiens.UCSC.hg19)
seqinfo(BreakpointIntervals.gr) = seqinfo(BSgenome.Hsapiens.UCSC.hg19)
BreakpointIntervals.gr <- trim(BreakpointIntervals.gr)
seq = getSeq(BSgenome.Hsapiens.UCSC.hg19, BreakpointIntervals.gr, as.character=F)
seq = seq[width(seq)>20]
names(seq) = BreakpointIntervals.gr$ID
writeXStringSet(seq, paste0("~/Desktop/PalmTrees/Results/Figures/CircleJunctionAnalysis/CircleJunction_BreakpointIntervals_41bp.fa"))
|
library(mgcv)
gam.interp.linear.fitteddf <- bam(FPASSD ~
interp.total.prob +
interp.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.interp.linear.fitteddf)
logLik.gam(gam.interp.linear.fitteddf)
gam.interp.balanced.linear.fitteddf <- bam(FPASSD ~
interp.balanced.total.prob +
interp.balanced.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.interp.balanced.linear.fitteddf)
logLik.gam(gam.interp.balanced.linear.fitteddf)
gam.rnn.linear.fitteddf <- bam(FPASSD ~
rnn.total.prob +
rnn.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.rnn.linear.fitteddf)
logLik.gam(gam.rnn.linear.fitteddf)
gam.klm5gram.linear.fitteddf <- bam(FPASSD ~
kenlm5gram.total.prob +
k5gram.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.klm5gram.linear.fitteddf)
logLik.gam(gam.klm5gram.linear.fitteddf)
gam.klm4gram.linear.fitteddf <- bam(FPASSD ~
kenlm4gram.total.prob +
k4gram.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.klm4gram.linear.fitteddf)
logLik.gam(gam.klm4gram.linear.fitteddf)
gam.klm3gram.linear.fitteddf <- bam(FPASSD ~
kenlm3gram.total.prob +
k3gram.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.klm3gram.linear.fitteddf)
logLik.gam(gam.klm3gram.linear.fitteddf)
gam.klm2gram.linear.fitteddf <- bam(FPASSD ~
kenlm2gram.total.prob +
k2gram.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.klm2gram.linear.fitteddf)
logLik.gam(gam.klm2gram.linear.fitteddf)
gam.klm1gram.linear.fitteddf <- bam(FPASSD ~
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.klm1gram.linear.fitteddf)
logLik.gam(gam.klm1gram.linear.fitteddf)
logLik.gam(gam.interp.linear.fitteddf)
logLik.gam(gam.interp.balanced.linear.fitteddf)
logLik.gam(gam.rnn.linear.fitteddf)
logLik.gam(gam.klm5gram.linear.fitteddf)
logLik.gam(gam.klm4gram.linear.fitteddf)
logLik.gam(gam.klm3gram.linear.fitteddf)
logLik.gam(gam.klm2gram.linear.fitteddf)
summary(gam.interp.linear.fitteddf)
summary(gam.interp.balanced.linear.fitteddf)
summary(gam.rnn.linear.fitteddf)
summary(gam.klm5gram.linear.fitteddf)
summary(gam.klm4gram.linear.fitteddf)
summary(gam.klm3gram.linear.fitteddf)
summary(gam.klm2gram.linear.fitteddf)
| /modeling_code/GAMs/linear_fitted_df_gams.R | no_license | rachelsterneck/lm_1b | R | false | false | 3,764 | r | library(mgcv)
gam.interp.linear.fitteddf <- bam(FPASSD ~
interp.total.prob +
interp.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.interp.linear.fitteddf)
logLik.gam(gam.interp.linear.fitteddf)
gam.interp.balanced.linear.fitteddf <- bam(FPASSD ~
interp.balanced.total.prob +
interp.balanced.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.interp.balanced.linear.fitteddf)
logLik.gam(gam.interp.balanced.linear.fitteddf)
gam.rnn.linear.fitteddf <- bam(FPASSD ~
rnn.total.prob +
rnn.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.rnn.linear.fitteddf)
logLik.gam(gam.rnn.linear.fitteddf)
gam.klm5gram.linear.fitteddf <- bam(FPASSD ~
kenlm5gram.total.prob +
k5gram.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.klm5gram.linear.fitteddf)
logLik.gam(gam.klm5gram.linear.fitteddf)
gam.klm4gram.linear.fitteddf <- bam(FPASSD ~
kenlm4gram.total.prob +
k4gram.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.klm4gram.linear.fitteddf)
logLik.gam(gam.klm4gram.linear.fitteddf)
gam.klm3gram.linear.fitteddf <- bam(FPASSD ~
kenlm3gram.total.prob +
k3gram.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.klm3gram.linear.fitteddf)
logLik.gam(gam.klm3gram.linear.fitteddf)
gam.klm2gram.linear.fitteddf <- bam(FPASSD ~
kenlm2gram.total.prob +
k2gram.prev +
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.klm2gram.linear.fitteddf)
logLik.gam(gam.klm2gram.linear.fitteddf)
gam.klm1gram.linear.fitteddf <- bam(FPASSD ~
te(kenlm1gram.total.prob, log(WLEN), k=5, fx=TRUE) +
te(k1gram.prev, log(wlen.prev), k=5, fx=TRUE) +
PREVFIX +
s(log(WNUM), bs = 'cr', k=3, fx=TRUE) +
s(SUBJ, bs='re', k=10),
method='ML',
data=dat.no.punct)
summary(gam.klm1gram.linear.fitteddf)
logLik.gam(gam.klm1gram.linear.fitteddf)
logLik.gam(gam.interp.linear.fitteddf)
logLik.gam(gam.interp.balanced.linear.fitteddf)
logLik.gam(gam.rnn.linear.fitteddf)
logLik.gam(gam.klm5gram.linear.fitteddf)
logLik.gam(gam.klm4gram.linear.fitteddf)
logLik.gam(gam.klm3gram.linear.fitteddf)
logLik.gam(gam.klm2gram.linear.fitteddf)
summary(gam.interp.linear.fitteddf)
summary(gam.interp.balanced.linear.fitteddf)
summary(gam.rnn.linear.fitteddf)
summary(gam.klm5gram.linear.fitteddf)
summary(gam.klm4gram.linear.fitteddf)
summary(gam.klm3gram.linear.fitteddf)
summary(gam.klm2gram.linear.fitteddf)
|
rm(list=ls())
gc()
main_wd <- "C:/Users/Nils_/OneDrive/Skrivbord/Main/Data"
setwd("C:/Users/Nils_/OneDrive/Skrivbord/Main/Data")
ref_genome = "BSgenome.Hsapiens.UCSC.hg19"
cosmic_signatures <- as.matrix(read.table("cosmic_signatures_extended.txt",header=TRUE))
library(reshape2)
setwd("C:/Users/Nils_/OneDrive/Skrivbord/Main/Data/mut_matrix")
load("GRange_cohort.rda")
setwd("C:/Users/Nils_/OneDrive/Skrivbord/Main/Pictures/Test")
for (i in 5:10){
pdf(file=paste(names(GRange_vcf)[i],"_.pdf"),height=8.27,width =11.69)
test <- GRange_vcf[[i]]
l <- split(test,names(test))
print("working on mut mat")
mutational_matrix <- mut_matrix(l,ref_genome)
print("done")
plot_mat <- mutational_matrix
plot_mat <- sweep(plot_mat,2,colSums(plot_mat),"/")
sample_order <- hclust(dist(t(plot_mat)),method="complete")$order
sample_order <- colnames(plot_mat)[sample_order]
plot_mat <- plot_mat[, match(sample_order ,colnames(plot_mat))]
melt_plot_mat <- melt(plot_mat)
p<-ggplot(data=melt_plot_mat,aes(x=Var2, y=Var1, fill=value))+
geom_tile(color = "gray")+
scale_fill_gradient(low = "white", high = "blue",limits = c(0,0.2))
print(p)
#Split into high and low mutation samples
select_high <- which(colSums(mutational_matrix) > 2 * median(colSums(mutational_matrix)))
select_low <- which(colSums(mutational_matrix) < 2 * median(colSums(mutational_matrix)))
mutational_matrix_high <- mutational_matrix[, select_high]
mutational_matrix_low <- mutational_matrix[, select_low]
#Cluster rows based on mutational_matrix
sample_order <- hclust(dist(t(mutational_matrix_high)),method="complete")$order
sample_order <- colnames(mutational_matrix_high)[sample_order]
mutational_matrix_high <- mutational_matrix_high[, match(sample_order ,colnames(mutational_matrix_high))]
#Create a cosine similarity matrix
cos_sim_samples_cosmic <- cos_sim_matrix(mutational_matrix_high, cosmic_signatures)
#Fit mut_matrix to cosmic
fit_to_cosmic <- fit_to_signatures(mutational_matrix_high,cosmic_signatures)
#Filter signatures to plot
select <- which(rowSums(fit_to_cosmic$contribution)/sum(rowSums(fit_to_cosmic$contribution)) > 0.01)
#Plot
print(plot_contribution(fit_to_cosmic$contribution[select ,],cosmic_signatures[,select],coord_flip= TRUE,mode="absolute"))
#Same for low
#Cluster rows based on mutational_matrix
sample_order <- hclust(dist(t(mutational_matrix_low)),method="complete")$order
sample_order <- colnames(mutational_matrix_low)[sample_order]
mutational_matrix_low <- mutational_matrix_low[, match(sample_order ,colnames(mutational_matrix_low))]
#Create a cosine similarity matrix
cos_sim_samples_cosmic <- cos_sim_matrix(mutational_matrix_low, cosmic_signatures)
#Fit mut_matrix to cosmic
fit_to_cosmic <- fit_to_signatures(mutational_matrix_low,cosmic_signatures)
#Filter signatures to plot Hur smart är det ens att göra såhär? Missar ju sällsynta men kanske betydande
select <- which(rowSums(fit_to_cosmic$contribution)/sum(rowSums(fit_to_cosmic$contribution)) > 0.00)
#Plot
print(plot_contribution(fit_to_cosmic$contribution[select ,],cosmic_signatures[,select],coord_flip= TRUE,mode="absolute"))
dev.off()
}
plot_mat <- mutational_matrix
plot_mat <- sweep(plot_mat,2,colSums(plot_mat),"/")
sample_order <- hclust(dist(t(plot_mat)),method="complete")$order
sample_order <- colnames(plot_mat)[sample_order]
plot_mat <- plot_mat[, match(sample_order ,colnames(plot_mat))]
melt_plot_mat <- melt(plot_mat)
p<-ggplot(data=melt_plot_mat,aes(x=Var2, y=Var1, fill=value))+
geom_tile(color = "gray")+
scale_fill_gradient(low = "white", high = "blue",limits = c(0,0.2))
print(p)
#Split into high and low mutation samples
select_high <- which(colSums(mutational_matrix) > 2 * median(colSums(mutational_matrix)))
select_low <- which(colSums(mutational_matrix) < 2 * median(colSums(mutational_matrix)))
mutational_matrix_high <- mutational_matrix[, select_high]
mutational_matrix_low <- mutational_matrix[, select_low]
#Cluster rows based on mutational_matrix
sample_order <- hclust(dist(t(mutational_matrix_high)),method="complete")$order
sample_order <- colnames(mutational_matrix_high)[sample_order]
mutational_matrix_high <- mutational_matrix_high[, match(sample_order ,colnames(mutational_matrix_high))]
#Create a cosine similarity matrix
cos_sim_samples_cosmic <- cos_sim_matrix(mutational_matrix_high, cosmic_signatures)
#Fit mut_matrix to cosmic
fit_to_cosmic <- fit_to_signatures(mutational_matrix_high,cosmic_signatures)
#Filter signatures to plot
select <- which(rowSums(fit_to_cosmic$contribution)/sum(rowSums(fit_to_cosmic$contribution)) > 0.01)
#Plot
print(plot_contribution(fit_to_cosmic$contribution[select ,],cosmic_signatures[,select],coord_flip= TRUE,mode="absolute"))
#Same for low
#Cluster rows based on mutational_matrix
sample_order <- hclust(dist(t(mutational_matrix_low)),method="complete")$order
sample_order <- colnames(mutational_matrix_low)[sample_order]
mutational_matrix_low <- mutational_matrix_low[, match(sample_order ,colnames(mutational_matrix_low))]
#Create a cosine similarity matrix
cos_sim_samples_cosmic <- cos_sim_matrix(mutational_matrix_low, cosmic_signatures)
#Fit mut_matrix to cosmic
fit_to_cosmic <- fit_to_signatures(mutational_matrix_low,cosmic_signatures)
#Filter signatures to plot
select <- which(rowSums(fit_to_cosmic$contribution)/sum(rowSums(fit_to_cosmic$contribution)) > 0.01)
#Plot
print(plot_contribution(fit_to_cosmic$contribution[select ,],cosmic_signatures[,select],coord_flip= TRUE,mode="absolute"))
| /Examples/Tisdag_over_cancer.R | no_license | NilsWeng/nilsPaket | R | false | false | 5,727 | r |
rm(list=ls())
gc()
main_wd <- "C:/Users/Nils_/OneDrive/Skrivbord/Main/Data"
setwd("C:/Users/Nils_/OneDrive/Skrivbord/Main/Data")
ref_genome = "BSgenome.Hsapiens.UCSC.hg19"
cosmic_signatures <- as.matrix(read.table("cosmic_signatures_extended.txt",header=TRUE))
library(reshape2)
setwd("C:/Users/Nils_/OneDrive/Skrivbord/Main/Data/mut_matrix")
load("GRange_cohort.rda")
setwd("C:/Users/Nils_/OneDrive/Skrivbord/Main/Pictures/Test")
for (i in 5:10){
pdf(file=paste(names(GRange_vcf)[i],"_.pdf"),height=8.27,width =11.69)
test <- GRange_vcf[[i]]
l <- split(test,names(test))
print("working on mut mat")
mutational_matrix <- mut_matrix(l,ref_genome)
print("done")
plot_mat <- mutational_matrix
plot_mat <- sweep(plot_mat,2,colSums(plot_mat),"/")
sample_order <- hclust(dist(t(plot_mat)),method="complete")$order
sample_order <- colnames(plot_mat)[sample_order]
plot_mat <- plot_mat[, match(sample_order ,colnames(plot_mat))]
melt_plot_mat <- melt(plot_mat)
p<-ggplot(data=melt_plot_mat,aes(x=Var2, y=Var1, fill=value))+
geom_tile(color = "gray")+
scale_fill_gradient(low = "white", high = "blue",limits = c(0,0.2))
print(p)
#Split into high and low mutation samples
select_high <- which(colSums(mutational_matrix) > 2 * median(colSums(mutational_matrix)))
select_low <- which(colSums(mutational_matrix) < 2 * median(colSums(mutational_matrix)))
mutational_matrix_high <- mutational_matrix[, select_high]
mutational_matrix_low <- mutational_matrix[, select_low]
#Cluster rows based on mutational_matrix
sample_order <- hclust(dist(t(mutational_matrix_high)),method="complete")$order
sample_order <- colnames(mutational_matrix_high)[sample_order]
mutational_matrix_high <- mutational_matrix_high[, match(sample_order ,colnames(mutational_matrix_high))]
#Create a cosine similarity matrix
cos_sim_samples_cosmic <- cos_sim_matrix(mutational_matrix_high, cosmic_signatures)
#Fit mut_matrix to cosmic
fit_to_cosmic <- fit_to_signatures(mutational_matrix_high,cosmic_signatures)
#Filter signatures to plot
select <- which(rowSums(fit_to_cosmic$contribution)/sum(rowSums(fit_to_cosmic$contribution)) > 0.01)
#Plot
print(plot_contribution(fit_to_cosmic$contribution[select ,],cosmic_signatures[,select],coord_flip= TRUE,mode="absolute"))
#Same for low
#Cluster rows based on mutational_matrix
sample_order <- hclust(dist(t(mutational_matrix_low)),method="complete")$order
sample_order <- colnames(mutational_matrix_low)[sample_order]
mutational_matrix_low <- mutational_matrix_low[, match(sample_order ,colnames(mutational_matrix_low))]
#Create a cosine similarity matrix
cos_sim_samples_cosmic <- cos_sim_matrix(mutational_matrix_low, cosmic_signatures)
#Fit mut_matrix to cosmic
fit_to_cosmic <- fit_to_signatures(mutational_matrix_low,cosmic_signatures)
#Filter signatures to plot Hur smart är det ens att göra såhär? Missar ju sällsynta men kanske betydande
select <- which(rowSums(fit_to_cosmic$contribution)/sum(rowSums(fit_to_cosmic$contribution)) > 0.00)
#Plot
print(plot_contribution(fit_to_cosmic$contribution[select ,],cosmic_signatures[,select],coord_flip= TRUE,mode="absolute"))
dev.off()
}
plot_mat <- mutational_matrix
plot_mat <- sweep(plot_mat,2,colSums(plot_mat),"/")
sample_order <- hclust(dist(t(plot_mat)),method="complete")$order
sample_order <- colnames(plot_mat)[sample_order]
plot_mat <- plot_mat[, match(sample_order ,colnames(plot_mat))]
melt_plot_mat <- melt(plot_mat)
p<-ggplot(data=melt_plot_mat,aes(x=Var2, y=Var1, fill=value))+
geom_tile(color = "gray")+
scale_fill_gradient(low = "white", high = "blue",limits = c(0,0.2))
print(p)
#Split into high and low mutation samples
select_high <- which(colSums(mutational_matrix) > 2 * median(colSums(mutational_matrix)))
select_low <- which(colSums(mutational_matrix) < 2 * median(colSums(mutational_matrix)))
mutational_matrix_high <- mutational_matrix[, select_high]
mutational_matrix_low <- mutational_matrix[, select_low]
#Cluster rows based on mutational_matrix
sample_order <- hclust(dist(t(mutational_matrix_high)),method="complete")$order
sample_order <- colnames(mutational_matrix_high)[sample_order]
mutational_matrix_high <- mutational_matrix_high[, match(sample_order ,colnames(mutational_matrix_high))]
#Create a cosine similarity matrix
cos_sim_samples_cosmic <- cos_sim_matrix(mutational_matrix_high, cosmic_signatures)
#Fit mut_matrix to cosmic
fit_to_cosmic <- fit_to_signatures(mutational_matrix_high,cosmic_signatures)
#Filter signatures to plot
select <- which(rowSums(fit_to_cosmic$contribution)/sum(rowSums(fit_to_cosmic$contribution)) > 0.01)
#Plot
print(plot_contribution(fit_to_cosmic$contribution[select ,],cosmic_signatures[,select],coord_flip= TRUE,mode="absolute"))
#Same for low
#Cluster rows based on mutational_matrix
sample_order <- hclust(dist(t(mutational_matrix_low)),method="complete")$order
sample_order <- colnames(mutational_matrix_low)[sample_order]
mutational_matrix_low <- mutational_matrix_low[, match(sample_order ,colnames(mutational_matrix_low))]
#Create a cosine similarity matrix
cos_sim_samples_cosmic <- cos_sim_matrix(mutational_matrix_low, cosmic_signatures)
#Fit mut_matrix to cosmic
fit_to_cosmic <- fit_to_signatures(mutational_matrix_low,cosmic_signatures)
#Filter signatures to plot
select <- which(rowSums(fit_to_cosmic$contribution)/sum(rowSums(fit_to_cosmic$contribution)) > 0.01)
#Plot
print(plot_contribution(fit_to_cosmic$contribution[select ,],cosmic_signatures[,select],coord_flip= TRUE,mode="absolute"))
|
plotTimeDistribution <- function(data,
cohortIds = NULL,
databaseIds = NULL,
xAxis = "database") {
if (is.null(cohortIds) || length(cohortIds) > 1 || xAxis != "database" || is.null(databaseIds)) {
warning("Not yet supported. Upcoming feature.")
return(NULL)
}
# Perform error checks for input variables
errorMessage <- checkmate::makeAssertCollection()
checkmate::assertTibble(x = data,
any.missing = FALSE,
min.rows = 1,
min.cols = 5,
null.ok = FALSE,
add = errorMessage)
checkmate::assertDouble(x = cohortIds,
lower = 1,
upper = 2^53,
any.missing = FALSE,
null.ok = TRUE,
min.len = 1,
add = errorMessage)
checkmate::assertCharacter(x = databaseIds,
any.missing = FALSE,
null.ok = TRUE,
min.len = 1,
unique = TRUE,
add = errorMessage)
checkmate::assertChoice(x = xAxis,
choices = c("database", "cohortId"),
add = errorMessage)
checkmate::assertNames(x = colnames(data),
must.include = c("Min", "P25", "Median", "P75", "Max"),
add = errorMessage)
checkmate::reportAssertions(collection = errorMessage)
plotData <- data
if (!is.null(cohortIds)) {
plotData <- plotData %>%
dplyr::filter(.data$cohortId %in% !!cohortIds)
}
if (!is.null(databaseIds)) {
plotData <- plotData %>%
dplyr::filter(.data$Database %in% !!databaseIds)
}
plot <- ggplot2::ggplot(data = plotData) +
ggplot2::aes(x = .data$Database,
ymin = .data$Min,
lower = .data$P25,
middle = .data$Median,
upper = .data$P75,
ymax = .data$Max,
group = .data$TimeMeasure,
average = .data$Average) +
ggplot2::geom_errorbar(mapping = ggplot2::aes(ymin = .data$Min,
ymax = .data$Max), size = 0.5) +
ggplot2::geom_boxplot(stat = "identity",
fill = rgb(0, 0, 0.8, alpha = 0.25),
size = 0.2) +
ggplot2::facet_grid(rows = Database~TimeMeasure, scales = "free", switch = "y") +
ggplot2::coord_flip() +
ggplot2::theme(panel.grid.major.y = ggplot2::element_blank(),
panel.grid.minor.y = ggplot2::element_blank(),
axis.title.y = ggplot2::element_blank(),
axis.ticks.y = ggplot2::element_blank(),
axis.text.y = ggplot2::element_blank(),
strip.text.y.left = ggplot2::element_text(angle = 0))
plot <- ggiraph::girafe(ggobj = plot,
options = list(
ggiraph::opts_sizing(width = .7),
ggiraph::opts_zoom(max = 5)),
width_svg = 12,
height_svg = 0.7 + 0.5 * length(databaseIds))
return(plot)
}
# how to render using pure plot ly. Plotly does not prefer precomputed data.
# TO DO: color and plot positions are not consistent yet.
# plot <- plotly::plot_ly(data = plotData,
# type = "box",
# median = plotData$P25,
# #Mean = plotData$Average,
# upperfence = plotData$Max,
# lowerfence = plotData$Min,
# split = plotData$TimeMeasure)
# loop thru database or cohorts as needed
# then subplot
# plot <- plotly::subplot(plots,nrows = length(input$databases),margin = 0.05)
plotIncidenceRate <- function(data,
cohortIds = NULL,
databaseIds = NULL,
stratifyByAgeGroup = TRUE,
stratifyByGender = TRUE,
stratifyByCalendarYear = TRUE,
yscaleFixed = FALSE) {
if (nrow(data) == 0) {
ParallelLogger::logWarn("Record counts are too low to plot.")
}
errorMessage <- checkmate::makeAssertCollection()
checkmate::assertTibble(x = data,
any.missing = TRUE,
min.rows = 1,
min.cols = 5,
null.ok = FALSE,
add = errorMessage)
checkmate::assertDouble(x = cohortIds,
lower = 1,
upper = 2^53,
any.missing = FALSE,
null.ok = TRUE,
min.len = 1,
add = errorMessage)
checkmate::assertCharacter(x = databaseIds,
any.missing = FALSE,
null.ok = TRUE,
min.len = 1,
unique = TRUE,
add = errorMessage)
checkmate::assertLogical(x = stratifyByAgeGroup,
any.missing = FALSE,
min.len = 1,
max.len = 1,
null.ok = FALSE,
add = errorMessage)
checkmate::assertLogical(x = stratifyByGender,
any.missing = FALSE,
min.len = 1,
max.len = 1,
null.ok = FALSE,
add = errorMessage)
checkmate::assertLogical(x = stratifyByCalendarYear,
any.missing = FALSE,
min.len = 1,
max.len = 1,
null.ok = FALSE,
add = errorMessage)
checkmate::assertLogical(x = yscaleFixed,
any.missing = FALSE,
min.len = 1,
max.len = 1,
null.ok = FALSE,
add = errorMessage)
checkmate::assertDouble(x = data$incidenceRate,
lower = 0,
any.missing = FALSE,
null.ok = FALSE,
min.len = 1,
add = errorMessage)
checkmate::reportAssertions(collection = errorMessage)
checkmate::assertDouble(x = data$incidenceRate,
lower = 0,
any.missing = FALSE,
null.ok = FALSE,
min.len = 1,
add = errorMessage)
checkmate::reportAssertions(collection = errorMessage)
plotData <- data %>%
dplyr::mutate(incidenceRate = round(.data$incidenceRate, digits = 3))
if (!is.null(cohortIds)) {
plotData <- plotData %>%
dplyr::filter(.data$cohortId %in% !!cohortIds)
}
if (!is.null(databaseIds)) {
plotData <- plotData %>%
dplyr::filter(.data$databaseId %in% !!databaseIds)
}
plotData <- plotData %>%
dplyr::mutate(strataGender = !is.na(.data$gender),
strataAgeGroup = !is.na(.data$ageGroup),
strataCalendarYear = !is.na(.data$calendarYear)) %>%
dplyr::filter(.data$strataGender %in% !!stratifyByGender &
.data$strataAgeGroup %in% !!stratifyByAgeGroup &
.data$strataCalendarYear %in% !!stratifyByCalendarYear) %>%
dplyr::select(-dplyr::starts_with("strata"))
aesthetics <- list(y = "incidenceRate")
if (stratifyByCalendarYear) {
aesthetics$x <- "calendarYear"
xLabel <- "Calender year"
showX <- TRUE
if (stratifyByGender) {
aesthetics$group <- "gender"
aesthetics$color <- "gender"
}
plotType <- "line"
} else {
xLabel <- ""
if (stratifyByGender) {
aesthetics$x <- "gender"
aesthetics$color <- "gender"
aesthetics$fill <- "gender"
showX <- TRUE
} else if (stratifyByAgeGroup) {
aesthetics$x <- "ageGroup"
showX <- TRUE
}
else{
aesthetics$x <- "cohortId"
showX <- FALSE
}
plotType <- "bar"
}
newSort <- plotData %>%
dplyr::select(.data$ageGroup) %>%
dplyr::distinct() %>%
dplyr::arrange(as.integer(sub(pattern = '-.+$','',x = .data$ageGroup)))
plotData <- plotData %>%
dplyr::arrange(ageGroup = factor(.data$ageGroup, levels = newSort$ageGroup), .data$ageGroup)
plotData$ageGroup <- factor(plotData$ageGroup,
levels = newSort$ageGroup)
plotData$tooltip <- c(paste0("Incidence Rate = ", scales::comma(plotData$incidenceRate, accuracy = 0.01),
"\nDatabase = ", plotData$databaseId,
"\nPerson years = ", scales::comma(plotData$personYears, accuracy = 0.1),
"\nCohort count = ", scales::comma(plotData$cohortCount)))
if (stratifyByAgeGroup) {
plotData$tooltip <- c(paste0(plotData$tooltip, "\nAge Group = ", plotData$ageGroup))
}
if (stratifyByGender) {
plotData$tooltip <- c(paste0(plotData$tooltip, "\nGender = ", plotData$gender))
}
if (stratifyByCalendarYear) {
plotData$tooltip <- c(paste0(plotData$tooltip, "\nYear = ", plotData$calendarYear))
}
plot <- ggplot2::ggplot(data = plotData,
do.call(ggplot2::aes_string, aesthetics)) +
ggplot2::xlab(xLabel) +
ggplot2::ylab("Incidence Rate (/1,000 person years)") +
ggplot2::theme(legend.position = "top",
legend.title = ggplot2::element_blank(),
axis.text.x = if (showX) ggplot2::element_text(angle = 90, vjust = 0.5) else ggplot2::element_blank() )
if (plotType == "line") {
plot <- plot +
ggiraph::geom_line_interactive(ggplot2::aes(), size = 1, alpha = 0.6) +
ggiraph::geom_point_interactive(ggplot2::aes(tooltip = tooltip), size = 2, alpha = 0.6)
} else {
plot <- plot + ggplot2::geom_bar(stat = "identity") +
ggiraph::geom_col_interactive( ggplot2::aes(tooltip = tooltip), size = 1)
}
# databaseId field only present when called in Shiny app:
if (!is.null(data$databaseId) && length(data$databaseId) > 1) {
if (yscaleFixed) {
scales <- "fixed"
} else {
scales <- "free_y"
}
if (stratifyByGender | stratifyByCalendarYear) {
if (stratifyByAgeGroup) {
plot <- plot + ggplot2::facet_grid(databaseId~plotData$ageGroup, scales = scales)
} else {
plot <- plot + ggplot2::facet_grid(databaseId~., scales = scales)
}
}
else
{
plot <- plot + ggplot2::facet_grid(databaseId~., scales = scales)
}
} else {
if (stratifyByAgeGroup) {
plot <- plot + ggplot2::facet_grid(~ageGroup)
}
}
plot <- ggiraph::girafe(ggobj = plot,
options = list(
ggiraph::opts_sizing(width = .7),
ggiraph::opts_zoom(max = 5)),
width_svg = 15,
height_svg = 1.5 + 2*length(unique(data$databaseId)))
return(plot)
}
plotCohortComparisonStandardizedDifference <- function(balance,
domain = "all",
targetLabel = "Mean Target",
comparatorLabel = "Mean Comparator") {
balance <- balance %>%
replace(is.na(.), 0)
domains <- c("condition", "device", "drug", "measurement", "observation", "procedure")
balance$domain <- tolower(gsub("[_ ].*", "", balance$covariateName))
balance$domain[!balance$domain %in% domains] <- "other"
if (domain != "all") {
balance <- balance %>%
dplyr::filter(.data$domain == !!domain)
}
# Can't make sense of plot with > 1000 dots anyway, so remove anything with small mean in both target and comparator:
if (nrow(balance) > 1000) {
balance <- balance %>%
dplyr::filter(.data$mean1 > 0.01 | .data$mean2 > 0.01)
}
# ggiraph::geom_point_interactive(ggplot2::aes(tooltip = tooltip), size = 3, alpha = 0.6)
balance$tooltip <- c(paste("Covariate Name:", balance$covariateName,
"\nDomain: ", balance$domain,
"\nMean Target: ", scales::comma(balance$mean1, accuracy = 0.1),
"\nMean Comparator:", scales::comma(balance$mean2, accuracy = 0.1),
"\nStd diff.:", scales::comma(balance$stdDiff, accuracy = 0.1)))
# Code used to generate palette:
# writeLines(paste(RColorBrewer::brewer.pal(n = length(domains), name = "Dark2"), collapse = "\", \""))
# Make sure colors are consistent, no matter which domains are included:
colors <- c("#1B9E77", "#D95F02", "#7570B3", "#E7298A", "#66A61E", "#E6AB02", "#444444")
colors <- colors[c(domains, "other") %in% unique(balance$domain)]
balance$domain <- factor(balance$domain, levels = c(domains, "other"))
targetLabel <- paste(strwrap(targetLabel, width = 50), collapse = "\n")
comparatorLabel <- paste(strwrap(comparatorLabel, width = 50), collapse = "\n")
plot <- ggplot2::ggplot(balance, ggplot2::aes(x = .data$mean1, y = .data$mean2, color = .data$domain)) +
ggiraph::geom_point_interactive(ggplot2::aes(tooltip = .data$tooltip), size = 3,shape = 16, alpha = 0.5) +
ggplot2::geom_abline(slope = 1, intercept = 0, linetype = "dashed") +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::geom_vline(xintercept = 0) +
ggplot2::scale_x_continuous(targetLabel, limits = c(0, 1)) +
ggplot2::scale_y_continuous(comparatorLabel, limits = c(0, 1)) +
ggplot2::scale_color_manual("Domain", values = colors)
plot <- ggiraph::girafe(ggobj = plot,
options = list(
ggiraph::opts_sizing(width = .7),
ggiraph::opts_zoom(max = 5)),width_svg = 12,
height_svg = 5)
return(plot)
}
plotCohortOverlapVennDiagram <- function(data,
targetCohortIds,
comparatorCohortIds,
databaseIds) {
# Perform error checks for input variables
errorMessage <- checkmate::makeAssertCollection()
checkmate::assertTibble(x = data,
any.missing = FALSE,
min.rows = 1,
min.cols = 5,
null.ok = FALSE,
add = errorMessage)
checkmate::assertDouble(x = targetCohortIds,
lower = 1,
upper = 2^53,
any.missing = FALSE,
null.ok = FALSE)
checkmate::assertDouble(x = comparatorCohortIds,
lower = 1,
upper = 2^53,
any.missing = FALSE,
null.ok = FALSE)
checkmate::assertCharacter(x = databaseIds,
any.missing = FALSE,
min.len = 1,
null.ok = TRUE
)
checkmate::reportAssertions(collection = errorMessage)
plot <- VennDiagram::draw.pairwise.venn(area1 = abs(data$eitherSubjects) - abs(data$cOnlySubjects),
area2 = abs(data$eitherSubjects) - abs(data$tOnlySubjects),
cross.area = abs(data$bothSubjects),
category = c("Target", "Comparator"),
col = c(rgb(0.8, 0, 0), rgb(0, 0, 0.8)),
fill = c(rgb(0.8, 0, 0), rgb(0, 0, 0.8)),
alpha = 0.2,
fontfamily = rep("sans", 3),
cat.fontfamily = rep("sans", 2),
margin = 0.01,
ind = FALSE)
# Borrowed from https://stackoverflow.com/questions/37239128/how-to-put-comma-in-large-number-of-venndiagram
idx <- sapply(plot, function(i) grepl("text", i$name))
for (i in 1:3) {
plot[idx][[i]]$label <- format(as.numeric(plot[idx][[i]]$label),
big.mark = ",",
scientific = FALSE)
}
grid::grid.draw(plot)
return(plot)
}
plotCohortOverlap <- function(data,
yAxis = "Percentages") {
# Perform error checks for input variables
errorMessage <- checkmate::makeAssertCollection()
checkmate::assertTibble(x = data,
any.missing = FALSE,
min.rows = 1,
min.cols = 6,
null.ok = FALSE,
add = errorMessage)
checkmate::reportAssertions(collection = errorMessage)
checkmate::assertNames(x = colnames(data),
must.include = c("databaseId",
"targetCohortId",
"comparatorCohortId",
"tOnlySubjects",
"cOnlySubjects",
"bothSubjects"),
add = errorMessage)
checkmate::reportAssertions(collection = errorMessage)
plotData <- data %>%
dplyr::mutate(absTOnlySubjects = abs(.data$tOnlySubjects),
absCOnlySubjects = abs(.data$cOnlySubjects),
absBothSubjects = abs(.data$bothSubjects),
absEitherSubjects = abs(.data$eitherSubjects),
signTOnlySubjects = dplyr::case_when(.data$tOnlySubjects < 0 ~ '<', TRUE ~ ''),
signCOnlySubjects = dplyr::case_when(.data$cOnlySubjects < 0 ~ '<', TRUE ~ ''),
signBothSubjects = dplyr::case_when(.data$bothSubjects < 0 ~ '<', TRUE ~ '')) %>%
dplyr::mutate(tOnlyString = paste0(.data$signTOnlySubjects,
scales::comma(.data$absTOnlySubjects),
" (",
.data$signTOnlySubjects,
scales::percent(.data$absTOnlySubjects/.data$absEitherSubjects,
accuracy = 1),
")"),
cOnlyString = paste0(.data$signCOnlySubjects,
scales::comma(.data$absCOnlySubjects),
" (",
.data$signCOnlySubjects,
scales::percent(.data$absCOnlySubjects/.data$absEitherSubjects,
accuracy = 1),
")"),
bothString = paste0(.data$signBothSubjects,
scales::comma(.data$absBothSubjects),
" (",
.data$signBothSubjects,
scales::percent(.data$absBothSubjects/.data$absEitherSubjects,
accuracy = 1),
")")) %>%
dplyr::mutate(tooltip = paste0("Database: ", .data$databaseId,
"\n", .data$targetShortName, ": ", .data$targetCohortName,
"\n", .data$comparatorShortName, ": ", .data$comparatorCohortName,
"\n", .data$targetShortName, " only: ", .data$tOnlyString,
"\n", .data$comparatorShortName, " only: ", .data$cOnlyString,
"\nBoth: ", .data$bothString)) %>%
dplyr::select(.data$targetShortName,
.data$comparatorShortName,
.data$databaseId,
.data$absTOnlySubjects,
.data$absCOnlySubjects,
.data$absBothSubjects,
.data$tooltip) %>%
tidyr::pivot_longer(cols = c("absTOnlySubjects",
"absCOnlySubjects",
"absBothSubjects"),
names_to = "subjectsIn",
values_to = "value") %>%
dplyr::mutate(subjectsIn = camelCaseToTitleCase(stringr::str_replace_all(string = .data$subjectsIn,
pattern = "abs|Subjects",
replacement = "")))
plotData$subjectsIn <- factor(plotData$subjectsIn, levels = c(" T Only", " Both", " C Only"))
if (yAxis == "Percentages") {
position = "fill"
} else {
position = "stack"
}
plot <- ggplot2::ggplot(data = plotData) +
ggplot2::aes(fill = .data$subjectsIn,
y = .data$value,
x = .data$comparatorShortName,
tooltip = .data$tooltip,
group = .data$subjectsIn) +
ggplot2::ylab(label = "") +
ggplot2::xlab(label = "") +
ggplot2::scale_fill_manual("Subjects in", values = c(rgb(0.8, 0.2, 0.2), rgb(0.3, 0.2, 0.4), rgb(0.4, 0.4, 0.9))) +
ggplot2::facet_grid(.data$targetShortName ~ .data$databaseId, drop = FALSE) +
ggiraph::geom_bar_interactive(position = position, alpha = 0.6, stat = "identity")
if (yAxis == "Percentages") {
plot <- plot + ggplot2::scale_y_continuous(labels = scales::percent)
} else {
plot <- plot + ggplot2::scale_y_continuous(labels = scales::comma)
}
width <- 1.5 + 1*length(unique(plotData$databaseId))
height <- 1.5 + 1*length(unique(plotData$targetShortName))
aspectRatio <- width / height
plot <- ggiraph::girafe(ggobj = plot,
options = list(
ggiraph::opts_sizing(width = .7),
ggiraph::opts_zoom(max = 5)),
width_svg = 6 * aspectRatio,
height_svg = 6)
return(plot)
}
# Future function getCohortOverlapHistogram:
# 1. https://stackoverflow.com/questions/20184096/how-to-plot-multiple-stacked-histograms-together-in-r
# 2. https://stackoverflow.com/questions/43415709/how-to-use-facet-grid-with-geom-histogram
# 3. https://www.datacamp.com/community/tutorials/facets-ggplot-r?utm_source=adwords_ppc&utm_campaignid=1455363063&utm_adgroupid=65083631748&utm_device=c&utm_keyword=&utm_matchtype=b&utm_network=g&utm_adpostion=&utm_creative=332602034361&utm_targetid=dsa-429603003980&utm_loc_interest_ms=&utm_loc_physical_ms=1007768&gclid=CjwKCAjw19z6BRAYEiwAmo64LQMUJwf1i0V-Zgc5hYhpDOFQeZU05reAJmQvo2-mClFWWM4_sJiSmBoC-YkQAvD_BwE
# 4. https://stackoverflow.com/questions/24123499/frequency-histograms-with-facets-calculating-percent-by-groups-used-in-facet-i
# 5. https://stackoverflow.com/questions/62821480/add-a-trace-to-every-facet-of-a-plotly-figure
# ComparatorOnlySubjs <- generateHistogramValues(len = seq(1:nrow(data)), val = data$cOnlySubjects)
# bothSubjs <- generateHistogramValues(seq(1:nrow(data)), data$bothSubjects)
# cohortOnlySubjs <- generateHistogramValues(seq(1:nrow(data)), data$tOnlySubjects)
# bucket <- list(ComparatorOnlySubjs = ComparatorOnlySubjs, bothSubjs = bothSubjs, cohortOnlySubjs = cohortOnlySubjs)
#
#
# p <- ggplot2::ggplot(reshape::melt(bucket), ggplot2::aes(value, fill = L1)) +
# ggplot2::xlab(label = "Comparators") +
# ggplot2::geom_histogram(position = "stack", binwidth = 1) +
# ggplot2::xlim(c(0,max(length(comparatorCohortIds()),10))) +
# ggplot2::facet_grid(rows = ggplot2::vars(data$targetCohortId),
# cols = ggplot2::vars(data$databaseId), scales = "free_y")
# plot <- plotly::ggplotly(p)
# GENERATE HISTOGRAM FUNCTION
# generateHistogramValues <- function(len,val)
# {
# fillVal <- c()
#
# inc <- 1
# for (i in len)
# {
# fillVal <- c(fillVal,rep(i,val[[i]]))
# }
# return(fillVal);
# }
| /CohortDiagnosticsBreastCancer/R/Plots.R | no_license | OHDSI/ShinyDeploy | R | false | false | 24,778 | r | plotTimeDistribution <- function(data,
cohortIds = NULL,
databaseIds = NULL,
xAxis = "database") {
if (is.null(cohortIds) || length(cohortIds) > 1 || xAxis != "database" || is.null(databaseIds)) {
warning("Not yet supported. Upcoming feature.")
return(NULL)
}
# Perform error checks for input variables
errorMessage <- checkmate::makeAssertCollection()
checkmate::assertTibble(x = data,
any.missing = FALSE,
min.rows = 1,
min.cols = 5,
null.ok = FALSE,
add = errorMessage)
checkmate::assertDouble(x = cohortIds,
lower = 1,
upper = 2^53,
any.missing = FALSE,
null.ok = TRUE,
min.len = 1,
add = errorMessage)
checkmate::assertCharacter(x = databaseIds,
any.missing = FALSE,
null.ok = TRUE,
min.len = 1,
unique = TRUE,
add = errorMessage)
checkmate::assertChoice(x = xAxis,
choices = c("database", "cohortId"),
add = errorMessage)
checkmate::assertNames(x = colnames(data),
must.include = c("Min", "P25", "Median", "P75", "Max"),
add = errorMessage)
checkmate::reportAssertions(collection = errorMessage)
plotData <- data
if (!is.null(cohortIds)) {
plotData <- plotData %>%
dplyr::filter(.data$cohortId %in% !!cohortIds)
}
if (!is.null(databaseIds)) {
plotData <- plotData %>%
dplyr::filter(.data$Database %in% !!databaseIds)
}
plot <- ggplot2::ggplot(data = plotData) +
ggplot2::aes(x = .data$Database,
ymin = .data$Min,
lower = .data$P25,
middle = .data$Median,
upper = .data$P75,
ymax = .data$Max,
group = .data$TimeMeasure,
average = .data$Average) +
ggplot2::geom_errorbar(mapping = ggplot2::aes(ymin = .data$Min,
ymax = .data$Max), size = 0.5) +
ggplot2::geom_boxplot(stat = "identity",
fill = rgb(0, 0, 0.8, alpha = 0.25),
size = 0.2) +
ggplot2::facet_grid(rows = Database~TimeMeasure, scales = "free", switch = "y") +
ggplot2::coord_flip() +
ggplot2::theme(panel.grid.major.y = ggplot2::element_blank(),
panel.grid.minor.y = ggplot2::element_blank(),
axis.title.y = ggplot2::element_blank(),
axis.ticks.y = ggplot2::element_blank(),
axis.text.y = ggplot2::element_blank(),
strip.text.y.left = ggplot2::element_text(angle = 0))
plot <- ggiraph::girafe(ggobj = plot,
options = list(
ggiraph::opts_sizing(width = .7),
ggiraph::opts_zoom(max = 5)),
width_svg = 12,
height_svg = 0.7 + 0.5 * length(databaseIds))
return(plot)
}
# how to render using pure plot ly. Plotly does not prefer precomputed data.
# TO DO: color and plot positions are not consistent yet.
# plot <- plotly::plot_ly(data = plotData,
# type = "box",
# median = plotData$P25,
# #Mean = plotData$Average,
# upperfence = plotData$Max,
# lowerfence = plotData$Min,
# split = plotData$TimeMeasure)
# loop thru database or cohorts as needed
# then subplot
# plot <- plotly::subplot(plots,nrows = length(input$databases),margin = 0.05)
plotIncidenceRate <- function(data,
cohortIds = NULL,
databaseIds = NULL,
stratifyByAgeGroup = TRUE,
stratifyByGender = TRUE,
stratifyByCalendarYear = TRUE,
yscaleFixed = FALSE) {
if (nrow(data) == 0) {
ParallelLogger::logWarn("Record counts are too low to plot.")
}
errorMessage <- checkmate::makeAssertCollection()
checkmate::assertTibble(x = data,
any.missing = TRUE,
min.rows = 1,
min.cols = 5,
null.ok = FALSE,
add = errorMessage)
checkmate::assertDouble(x = cohortIds,
lower = 1,
upper = 2^53,
any.missing = FALSE,
null.ok = TRUE,
min.len = 1,
add = errorMessage)
checkmate::assertCharacter(x = databaseIds,
any.missing = FALSE,
null.ok = TRUE,
min.len = 1,
unique = TRUE,
add = errorMessage)
checkmate::assertLogical(x = stratifyByAgeGroup,
any.missing = FALSE,
min.len = 1,
max.len = 1,
null.ok = FALSE,
add = errorMessage)
checkmate::assertLogical(x = stratifyByGender,
any.missing = FALSE,
min.len = 1,
max.len = 1,
null.ok = FALSE,
add = errorMessage)
checkmate::assertLogical(x = stratifyByCalendarYear,
any.missing = FALSE,
min.len = 1,
max.len = 1,
null.ok = FALSE,
add = errorMessage)
checkmate::assertLogical(x = yscaleFixed,
any.missing = FALSE,
min.len = 1,
max.len = 1,
null.ok = FALSE,
add = errorMessage)
checkmate::assertDouble(x = data$incidenceRate,
lower = 0,
any.missing = FALSE,
null.ok = FALSE,
min.len = 1,
add = errorMessage)
checkmate::reportAssertions(collection = errorMessage)
checkmate::assertDouble(x = data$incidenceRate,
lower = 0,
any.missing = FALSE,
null.ok = FALSE,
min.len = 1,
add = errorMessage)
checkmate::reportAssertions(collection = errorMessage)
plotData <- data %>%
dplyr::mutate(incidenceRate = round(.data$incidenceRate, digits = 3))
if (!is.null(cohortIds)) {
plotData <- plotData %>%
dplyr::filter(.data$cohortId %in% !!cohortIds)
}
if (!is.null(databaseIds)) {
plotData <- plotData %>%
dplyr::filter(.data$databaseId %in% !!databaseIds)
}
plotData <- plotData %>%
dplyr::mutate(strataGender = !is.na(.data$gender),
strataAgeGroup = !is.na(.data$ageGroup),
strataCalendarYear = !is.na(.data$calendarYear)) %>%
dplyr::filter(.data$strataGender %in% !!stratifyByGender &
.data$strataAgeGroup %in% !!stratifyByAgeGroup &
.data$strataCalendarYear %in% !!stratifyByCalendarYear) %>%
dplyr::select(-dplyr::starts_with("strata"))
aesthetics <- list(y = "incidenceRate")
if (stratifyByCalendarYear) {
aesthetics$x <- "calendarYear"
xLabel <- "Calender year"
showX <- TRUE
if (stratifyByGender) {
aesthetics$group <- "gender"
aesthetics$color <- "gender"
}
plotType <- "line"
} else {
xLabel <- ""
if (stratifyByGender) {
aesthetics$x <- "gender"
aesthetics$color <- "gender"
aesthetics$fill <- "gender"
showX <- TRUE
} else if (stratifyByAgeGroup) {
aesthetics$x <- "ageGroup"
showX <- TRUE
}
else{
aesthetics$x <- "cohortId"
showX <- FALSE
}
plotType <- "bar"
}
newSort <- plotData %>%
dplyr::select(.data$ageGroup) %>%
dplyr::distinct() %>%
dplyr::arrange(as.integer(sub(pattern = '-.+$','',x = .data$ageGroup)))
plotData <- plotData %>%
dplyr::arrange(ageGroup = factor(.data$ageGroup, levels = newSort$ageGroup), .data$ageGroup)
plotData$ageGroup <- factor(plotData$ageGroup,
levels = newSort$ageGroup)
plotData$tooltip <- c(paste0("Incidence Rate = ", scales::comma(plotData$incidenceRate, accuracy = 0.01),
"\nDatabase = ", plotData$databaseId,
"\nPerson years = ", scales::comma(plotData$personYears, accuracy = 0.1),
"\nCohort count = ", scales::comma(plotData$cohortCount)))
if (stratifyByAgeGroup) {
plotData$tooltip <- c(paste0(plotData$tooltip, "\nAge Group = ", plotData$ageGroup))
}
if (stratifyByGender) {
plotData$tooltip <- c(paste0(plotData$tooltip, "\nGender = ", plotData$gender))
}
if (stratifyByCalendarYear) {
plotData$tooltip <- c(paste0(plotData$tooltip, "\nYear = ", plotData$calendarYear))
}
plot <- ggplot2::ggplot(data = plotData,
do.call(ggplot2::aes_string, aesthetics)) +
ggplot2::xlab(xLabel) +
ggplot2::ylab("Incidence Rate (/1,000 person years)") +
ggplot2::theme(legend.position = "top",
legend.title = ggplot2::element_blank(),
axis.text.x = if (showX) ggplot2::element_text(angle = 90, vjust = 0.5) else ggplot2::element_blank() )
if (plotType == "line") {
plot <- plot +
ggiraph::geom_line_interactive(ggplot2::aes(), size = 1, alpha = 0.6) +
ggiraph::geom_point_interactive(ggplot2::aes(tooltip = tooltip), size = 2, alpha = 0.6)
} else {
plot <- plot + ggplot2::geom_bar(stat = "identity") +
ggiraph::geom_col_interactive( ggplot2::aes(tooltip = tooltip), size = 1)
}
# databaseId field only present when called in Shiny app:
if (!is.null(data$databaseId) && length(data$databaseId) > 1) {
if (yscaleFixed) {
scales <- "fixed"
} else {
scales <- "free_y"
}
if (stratifyByGender | stratifyByCalendarYear) {
if (stratifyByAgeGroup) {
plot <- plot + ggplot2::facet_grid(databaseId~plotData$ageGroup, scales = scales)
} else {
plot <- plot + ggplot2::facet_grid(databaseId~., scales = scales)
}
}
else
{
plot <- plot + ggplot2::facet_grid(databaseId~., scales = scales)
}
} else {
if (stratifyByAgeGroup) {
plot <- plot + ggplot2::facet_grid(~ageGroup)
}
}
plot <- ggiraph::girafe(ggobj = plot,
options = list(
ggiraph::opts_sizing(width = .7),
ggiraph::opts_zoom(max = 5)),
width_svg = 15,
height_svg = 1.5 + 2*length(unique(data$databaseId)))
return(plot)
}
plotCohortComparisonStandardizedDifference <- function(balance,
domain = "all",
targetLabel = "Mean Target",
comparatorLabel = "Mean Comparator") {
balance <- balance %>%
replace(is.na(.), 0)
domains <- c("condition", "device", "drug", "measurement", "observation", "procedure")
balance$domain <- tolower(gsub("[_ ].*", "", balance$covariateName))
balance$domain[!balance$domain %in% domains] <- "other"
if (domain != "all") {
balance <- balance %>%
dplyr::filter(.data$domain == !!domain)
}
# Can't make sense of plot with > 1000 dots anyway, so remove anything with small mean in both target and comparator:
if (nrow(balance) > 1000) {
balance <- balance %>%
dplyr::filter(.data$mean1 > 0.01 | .data$mean2 > 0.01)
}
# ggiraph::geom_point_interactive(ggplot2::aes(tooltip = tooltip), size = 3, alpha = 0.6)
balance$tooltip <- c(paste("Covariate Name:", balance$covariateName,
"\nDomain: ", balance$domain,
"\nMean Target: ", scales::comma(balance$mean1, accuracy = 0.1),
"\nMean Comparator:", scales::comma(balance$mean2, accuracy = 0.1),
"\nStd diff.:", scales::comma(balance$stdDiff, accuracy = 0.1)))
# Code used to generate palette:
# writeLines(paste(RColorBrewer::brewer.pal(n = length(domains), name = "Dark2"), collapse = "\", \""))
# Make sure colors are consistent, no matter which domains are included:
colors <- c("#1B9E77", "#D95F02", "#7570B3", "#E7298A", "#66A61E", "#E6AB02", "#444444")
colors <- colors[c(domains, "other") %in% unique(balance$domain)]
balance$domain <- factor(balance$domain, levels = c(domains, "other"))
targetLabel <- paste(strwrap(targetLabel, width = 50), collapse = "\n")
comparatorLabel <- paste(strwrap(comparatorLabel, width = 50), collapse = "\n")
plot <- ggplot2::ggplot(balance, ggplot2::aes(x = .data$mean1, y = .data$mean2, color = .data$domain)) +
ggiraph::geom_point_interactive(ggplot2::aes(tooltip = .data$tooltip), size = 3,shape = 16, alpha = 0.5) +
ggplot2::geom_abline(slope = 1, intercept = 0, linetype = "dashed") +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::geom_vline(xintercept = 0) +
ggplot2::scale_x_continuous(targetLabel, limits = c(0, 1)) +
ggplot2::scale_y_continuous(comparatorLabel, limits = c(0, 1)) +
ggplot2::scale_color_manual("Domain", values = colors)
plot <- ggiraph::girafe(ggobj = plot,
options = list(
ggiraph::opts_sizing(width = .7),
ggiraph::opts_zoom(max = 5)),width_svg = 12,
height_svg = 5)
return(plot)
}
plotCohortOverlapVennDiagram <- function(data,
targetCohortIds,
comparatorCohortIds,
databaseIds) {
# Perform error checks for input variables
errorMessage <- checkmate::makeAssertCollection()
checkmate::assertTibble(x = data,
any.missing = FALSE,
min.rows = 1,
min.cols = 5,
null.ok = FALSE,
add = errorMessage)
checkmate::assertDouble(x = targetCohortIds,
lower = 1,
upper = 2^53,
any.missing = FALSE,
null.ok = FALSE)
checkmate::assertDouble(x = comparatorCohortIds,
lower = 1,
upper = 2^53,
any.missing = FALSE,
null.ok = FALSE)
checkmate::assertCharacter(x = databaseIds,
any.missing = FALSE,
min.len = 1,
null.ok = TRUE
)
checkmate::reportAssertions(collection = errorMessage)
plot <- VennDiagram::draw.pairwise.venn(area1 = abs(data$eitherSubjects) - abs(data$cOnlySubjects),
area2 = abs(data$eitherSubjects) - abs(data$tOnlySubjects),
cross.area = abs(data$bothSubjects),
category = c("Target", "Comparator"),
col = c(rgb(0.8, 0, 0), rgb(0, 0, 0.8)),
fill = c(rgb(0.8, 0, 0), rgb(0, 0, 0.8)),
alpha = 0.2,
fontfamily = rep("sans", 3),
cat.fontfamily = rep("sans", 2),
margin = 0.01,
ind = FALSE)
# Borrowed from https://stackoverflow.com/questions/37239128/how-to-put-comma-in-large-number-of-venndiagram
idx <- sapply(plot, function(i) grepl("text", i$name))
for (i in 1:3) {
plot[idx][[i]]$label <- format(as.numeric(plot[idx][[i]]$label),
big.mark = ",",
scientific = FALSE)
}
grid::grid.draw(plot)
return(plot)
}
plotCohortOverlap <- function(data,
yAxis = "Percentages") {
# Perform error checks for input variables
errorMessage <- checkmate::makeAssertCollection()
checkmate::assertTibble(x = data,
any.missing = FALSE,
min.rows = 1,
min.cols = 6,
null.ok = FALSE,
add = errorMessage)
checkmate::reportAssertions(collection = errorMessage)
checkmate::assertNames(x = colnames(data),
must.include = c("databaseId",
"targetCohortId",
"comparatorCohortId",
"tOnlySubjects",
"cOnlySubjects",
"bothSubjects"),
add = errorMessage)
checkmate::reportAssertions(collection = errorMessage)
plotData <- data %>%
dplyr::mutate(absTOnlySubjects = abs(.data$tOnlySubjects),
absCOnlySubjects = abs(.data$cOnlySubjects),
absBothSubjects = abs(.data$bothSubjects),
absEitherSubjects = abs(.data$eitherSubjects),
signTOnlySubjects = dplyr::case_when(.data$tOnlySubjects < 0 ~ '<', TRUE ~ ''),
signCOnlySubjects = dplyr::case_when(.data$cOnlySubjects < 0 ~ '<', TRUE ~ ''),
signBothSubjects = dplyr::case_when(.data$bothSubjects < 0 ~ '<', TRUE ~ '')) %>%
dplyr::mutate(tOnlyString = paste0(.data$signTOnlySubjects,
scales::comma(.data$absTOnlySubjects),
" (",
.data$signTOnlySubjects,
scales::percent(.data$absTOnlySubjects/.data$absEitherSubjects,
accuracy = 1),
")"),
cOnlyString = paste0(.data$signCOnlySubjects,
scales::comma(.data$absCOnlySubjects),
" (",
.data$signCOnlySubjects,
scales::percent(.data$absCOnlySubjects/.data$absEitherSubjects,
accuracy = 1),
")"),
bothString = paste0(.data$signBothSubjects,
scales::comma(.data$absBothSubjects),
" (",
.data$signBothSubjects,
scales::percent(.data$absBothSubjects/.data$absEitherSubjects,
accuracy = 1),
")")) %>%
dplyr::mutate(tooltip = paste0("Database: ", .data$databaseId,
"\n", .data$targetShortName, ": ", .data$targetCohortName,
"\n", .data$comparatorShortName, ": ", .data$comparatorCohortName,
"\n", .data$targetShortName, " only: ", .data$tOnlyString,
"\n", .data$comparatorShortName, " only: ", .data$cOnlyString,
"\nBoth: ", .data$bothString)) %>%
dplyr::select(.data$targetShortName,
.data$comparatorShortName,
.data$databaseId,
.data$absTOnlySubjects,
.data$absCOnlySubjects,
.data$absBothSubjects,
.data$tooltip) %>%
tidyr::pivot_longer(cols = c("absTOnlySubjects",
"absCOnlySubjects",
"absBothSubjects"),
names_to = "subjectsIn",
values_to = "value") %>%
dplyr::mutate(subjectsIn = camelCaseToTitleCase(stringr::str_replace_all(string = .data$subjectsIn,
pattern = "abs|Subjects",
replacement = "")))
plotData$subjectsIn <- factor(plotData$subjectsIn, levels = c(" T Only", " Both", " C Only"))
if (yAxis == "Percentages") {
position = "fill"
} else {
position = "stack"
}
plot <- ggplot2::ggplot(data = plotData) +
ggplot2::aes(fill = .data$subjectsIn,
y = .data$value,
x = .data$comparatorShortName,
tooltip = .data$tooltip,
group = .data$subjectsIn) +
ggplot2::ylab(label = "") +
ggplot2::xlab(label = "") +
ggplot2::scale_fill_manual("Subjects in", values = c(rgb(0.8, 0.2, 0.2), rgb(0.3, 0.2, 0.4), rgb(0.4, 0.4, 0.9))) +
ggplot2::facet_grid(.data$targetShortName ~ .data$databaseId, drop = FALSE) +
ggiraph::geom_bar_interactive(position = position, alpha = 0.6, stat = "identity")
if (yAxis == "Percentages") {
plot <- plot + ggplot2::scale_y_continuous(labels = scales::percent)
} else {
plot <- plot + ggplot2::scale_y_continuous(labels = scales::comma)
}
width <- 1.5 + 1*length(unique(plotData$databaseId))
height <- 1.5 + 1*length(unique(plotData$targetShortName))
aspectRatio <- width / height
plot <- ggiraph::girafe(ggobj = plot,
options = list(
ggiraph::opts_sizing(width = .7),
ggiraph::opts_zoom(max = 5)),
width_svg = 6 * aspectRatio,
height_svg = 6)
return(plot)
}
# Future function getCohortOverlapHistogram:
# 1. https://stackoverflow.com/questions/20184096/how-to-plot-multiple-stacked-histograms-together-in-r
# 2. https://stackoverflow.com/questions/43415709/how-to-use-facet-grid-with-geom-histogram
# 3. https://www.datacamp.com/community/tutorials/facets-ggplot-r?utm_source=adwords_ppc&utm_campaignid=1455363063&utm_adgroupid=65083631748&utm_device=c&utm_keyword=&utm_matchtype=b&utm_network=g&utm_adpostion=&utm_creative=332602034361&utm_targetid=dsa-429603003980&utm_loc_interest_ms=&utm_loc_physical_ms=1007768&gclid=CjwKCAjw19z6BRAYEiwAmo64LQMUJwf1i0V-Zgc5hYhpDOFQeZU05reAJmQvo2-mClFWWM4_sJiSmBoC-YkQAvD_BwE
# 4. https://stackoverflow.com/questions/24123499/frequency-histograms-with-facets-calculating-percent-by-groups-used-in-facet-i
# 5. https://stackoverflow.com/questions/62821480/add-a-trace-to-every-facet-of-a-plotly-figure
# ComparatorOnlySubjs <- generateHistogramValues(len = seq(1:nrow(data)), val = data$cOnlySubjects)
# bothSubjs <- generateHistogramValues(seq(1:nrow(data)), data$bothSubjects)
# cohortOnlySubjs <- generateHistogramValues(seq(1:nrow(data)), data$tOnlySubjects)
# bucket <- list(ComparatorOnlySubjs = ComparatorOnlySubjs, bothSubjs = bothSubjs, cohortOnlySubjs = cohortOnlySubjs)
#
#
# p <- ggplot2::ggplot(reshape::melt(bucket), ggplot2::aes(value, fill = L1)) +
# ggplot2::xlab(label = "Comparators") +
# ggplot2::geom_histogram(position = "stack", binwidth = 1) +
# ggplot2::xlim(c(0,max(length(comparatorCohortIds()),10))) +
# ggplot2::facet_grid(rows = ggplot2::vars(data$targetCohortId),
# cols = ggplot2::vars(data$databaseId), scales = "free_y")
# plot <- plotly::ggplotly(p)
# GENERATE HISTOGRAM FUNCTION
# generateHistogramValues <- function(len,val)
# {
# fillVal <- c()
#
# inc <- 1
# for (i in len)
# {
# fillVal <- c(fillVal,rep(i,val[[i]]))
# }
# return(fillVal);
# }
|
# Compare event and impulse based versions of the bgbsb1 model.
source('./bootstrap_functions.r')
impt_D1_tmp <- read.csv (file="./results/bgbsb1_impt/d1spikes.csv", header=FALSE, sep=",");
impt_D1 <- impt_D1_tmp$V1;
evbased_D1_tmp <- read.csv (file="./results/bgbsb1/d1spikes.csv", header=FALSE, sep=",");
evbased_D1 <- evbased_D1_tmp$V1;
impt_D2_tmp <- read.csv (file="./results/bgbsb1_impt/d2spikes.csv", header=FALSE, sep=",");
impt_D2 <- impt_D2_tmp$V1;
evbased_D2_tmp <- read.csv (file="./results/bgbsb1/d2spikes.csv", header=FALSE, sep=",");
evbased_D2 <- evbased_D2_tmp$V1;
impt_FSI_tmp <- read.csv (file="./results/bgbsb1_impt/fsispikes.csv", header=FALSE, sep=",");
impt_FSI <- impt_FSI_tmp$V1;
evbased_FSI_tmp <- read.csv (file="./results/bgbsb1/fsispikes.csv", header=FALSE, sep=",");
evbased_FSI <- evbased_FSI_tmp$V1;
impt_STN_tmp <- read.csv (file="./results/bgbsb1_impt/stnspikes.csv", header=FALSE, sep=",");
impt_STN <- impt_STN_tmp$V1;
evbased_STN_tmp <- read.csv (file="./results/bgbsb1/stnspikes.csv", header=FALSE, sep=",");
evbased_STN <- evbased_STN_tmp$V1;
impt_GPe_tmp <- read.csv (file="./results/bgbsb1_impt/gpespikes.csv", header=FALSE, sep=",");
impt_GPe <- impt_GPe_tmp$V1;
evbased_GPe_tmp <- read.csv (file="./results/bgbsb1/gpespikes.csv", header=FALSE, sep=",");
evbased_GPe <- evbased_GPe_tmp$V1;
impt_SNr_tmp <- read.csv (file="./results/bgbsb1_impt/snrspikes.csv", header=FALSE, sep=",");
impt_SNr <- impt_SNr_tmp$V1;
evbased_SNr_tmp <- read.csv (file="./results/bgbsb1/snrspikes.csv", header=FALSE, sep=",");
evbased_SNr <- evbased_SNr_tmp$V1;
print ("*********** Means *************")
D1mean <- b.mean(impt_D1, 256)
D2mean <- b.mean(impt_D2, 256)
FSImean <- b.mean(impt_FSI, 256)
STNmean <- b.mean(impt_STN, 256)
GPemean <- b.mean(impt_GPe, 256)
SNrmean <- b.mean(impt_SNr, 256)
print (sprintf ("Impulse based model, D1 mean: %f spikes with stderr %f", mean(impt_D1), D1mean$std.err))
print (sprintf ("Impulse based model, D2 mean: %f spikes with stderr %f", mean(impt_D2), D2mean$std.err))
print (sprintf ("Impulse based model, FSI mean: %f spikes with stderr %f", mean(impt_FSI), FSImean$std.err))
print (sprintf ("Impulse based model, STN mean: %f spikes with stderr %f", mean(impt_STN), STNmean$std.err))
print (sprintf ("Impulse based model, GPe mean: %f spikes with stderr %f", mean(impt_GPe), GPemean$std.err))
print (sprintf ("Impulse based model, SNr mean: %f spikes with stderr %f", mean(impt_SNr), SNrmean$std.err))
D1mean <- b.mean(evbased_D1, 256)
D2mean <- b.mean(evbased_D2, 256)
FSImean <- b.mean(evbased_FSI, 256)
STNmean <- b.mean(evbased_STN, 256)
GPemean <- b.mean(evbased_GPe, 256)
SNrmean <- b.mean(evbased_SNr, 256)
print (sprintf ("Event based model, D1 mean: %f spikes with stderr %f", mean(evbased_D1), D1mean$std.err))
print (sprintf ("Event based model, D2 mean: %f spikes with stderr %f", mean(evbased_D2), D2mean$std.err))
print (sprintf ("Event based model, FSI mean: %f spikes with stderr %f", mean(evbased_FSI), FSImean$std.err))
print (sprintf ("Event based model, STN mean: %f spikes with stderr %f", mean(evbased_STN), STNmean$std.err))
print (sprintf ("Event based model, GPe mean: %f spikes with stderr %f", mean(evbased_GPe), GPemean$std.err))
print (sprintf ("Event based model, SNr mean: %f spikes with stderr %f", mean(evbased_SNr), SNrmean$std.err))
print ("*********** Difference/Stderr *************")
D1diff <- b.diffste (evbased_D1, impt_D1, 256)
print (sprintf ("For D1 evbased/impulsebased, difference is %f, standard error estimate: %f", D1diff$meandiff, D1diff$stderr))
D2diff <- b.diffste (evbased_D2, impt_D2, 256)
print (sprintf ("For D2 evbased/impulsebased, difference is %f, standard error estimate: %f", D2diff$meandiff, D2diff$stderr))
FSIdiff <- b.diffste (evbased_FSI, impt_FSI, 256)
print (sprintf ("For FSI evbased/impulsebased, difference is %f, standard error estimate: %f", FSIdiff$meandiff, FSIdiff$stderr))
STNdiff <- b.diffste (evbased_STN, impt_STN, 256)
print (sprintf ("For STN evbased/impulsebased, difference is %f, standard error estimate: %f", STNdiff$meandiff, STNdiff$stderr))
GPediff <- b.diffste (evbased_GPe, impt_GPe, 256)
print (sprintf ("For GPe evbased/impulsebased, difference is %f, standard error estimate: %f", GPediff$meandiff, GPediff$stderr))
SNrdiff <- b.diffste (evbased_SNr, impt_SNr, 256)
print (sprintf ("For SNr evbased/impulsebased, difference is %f, standard error estimate: %f", SNrdiff$meandiff, SNrdiff$stderr))
print ("*********** Studentized t-test *************")
D1_ttest <- b.ttest_equalityofmeans(evbased_D1,impt_D1,10000)
b.showsiglev (D1_ttest, "Evbased vs Impulse based D1 population")
D2_ttest <- b.ttest_equalityofmeans(evbased_D2,impt_D2,10000)
b.showsiglev (D2_ttest, "Evbased vs Impulse based D2 population")
STN_ttest <- b.ttest_equalityofmeans(evbased_STN,impt_STN,10000)
b.showsiglev (STN_ttest, "Evbased vs Impulse based STN population")
FSI_ttest <- b.ttest_equalityofmeans(evbased_FSI,impt_FSI,10000)
b.showsiglev (FSI_ttest, "Evbased vs Impulse based FSI population")
GPe_ttest <- b.ttest_equalityofmeans(evbased_GPe,impt_GPe,10000)
b.showsiglev (GPe_ttest, "Evbased vs Impulse based GPe population")
SNr_ttest <- b.ttest_equalityofmeans(evbased_SNr,impt_SNr,10000)
b.showsiglev (SNr_ttest, "Evbased vs Impulse based SNr population")
| /labbook/bgbsb1_spineml_compare.r | no_license | ABRG-Models/GPR-BSB | R | false | false | 5,327 | r | # Compare event and impulse based versions of the bgbsb1 model.
source('./bootstrap_functions.r')
impt_D1_tmp <- read.csv (file="./results/bgbsb1_impt/d1spikes.csv", header=FALSE, sep=",");
impt_D1 <- impt_D1_tmp$V1;
evbased_D1_tmp <- read.csv (file="./results/bgbsb1/d1spikes.csv", header=FALSE, sep=",");
evbased_D1 <- evbased_D1_tmp$V1;
impt_D2_tmp <- read.csv (file="./results/bgbsb1_impt/d2spikes.csv", header=FALSE, sep=",");
impt_D2 <- impt_D2_tmp$V1;
evbased_D2_tmp <- read.csv (file="./results/bgbsb1/d2spikes.csv", header=FALSE, sep=",");
evbased_D2 <- evbased_D2_tmp$V1;
impt_FSI_tmp <- read.csv (file="./results/bgbsb1_impt/fsispikes.csv", header=FALSE, sep=",");
impt_FSI <- impt_FSI_tmp$V1;
evbased_FSI_tmp <- read.csv (file="./results/bgbsb1/fsispikes.csv", header=FALSE, sep=",");
evbased_FSI <- evbased_FSI_tmp$V1;
impt_STN_tmp <- read.csv (file="./results/bgbsb1_impt/stnspikes.csv", header=FALSE, sep=",");
impt_STN <- impt_STN_tmp$V1;
evbased_STN_tmp <- read.csv (file="./results/bgbsb1/stnspikes.csv", header=FALSE, sep=",");
evbased_STN <- evbased_STN_tmp$V1;
impt_GPe_tmp <- read.csv (file="./results/bgbsb1_impt/gpespikes.csv", header=FALSE, sep=",");
impt_GPe <- impt_GPe_tmp$V1;
evbased_GPe_tmp <- read.csv (file="./results/bgbsb1/gpespikes.csv", header=FALSE, sep=",");
evbased_GPe <- evbased_GPe_tmp$V1;
impt_SNr_tmp <- read.csv (file="./results/bgbsb1_impt/snrspikes.csv", header=FALSE, sep=",");
impt_SNr <- impt_SNr_tmp$V1;
evbased_SNr_tmp <- read.csv (file="./results/bgbsb1/snrspikes.csv", header=FALSE, sep=",");
evbased_SNr <- evbased_SNr_tmp$V1;
print ("*********** Means *************")
D1mean <- b.mean(impt_D1, 256)
D2mean <- b.mean(impt_D2, 256)
FSImean <- b.mean(impt_FSI, 256)
STNmean <- b.mean(impt_STN, 256)
GPemean <- b.mean(impt_GPe, 256)
SNrmean <- b.mean(impt_SNr, 256)
print (sprintf ("Impulse based model, D1 mean: %f spikes with stderr %f", mean(impt_D1), D1mean$std.err))
print (sprintf ("Impulse based model, D2 mean: %f spikes with stderr %f", mean(impt_D2), D2mean$std.err))
print (sprintf ("Impulse based model, FSI mean: %f spikes with stderr %f", mean(impt_FSI), FSImean$std.err))
print (sprintf ("Impulse based model, STN mean: %f spikes with stderr %f", mean(impt_STN), STNmean$std.err))
print (sprintf ("Impulse based model, GPe mean: %f spikes with stderr %f", mean(impt_GPe), GPemean$std.err))
print (sprintf ("Impulse based model, SNr mean: %f spikes with stderr %f", mean(impt_SNr), SNrmean$std.err))
D1mean <- b.mean(evbased_D1, 256)
D2mean <- b.mean(evbased_D2, 256)
FSImean <- b.mean(evbased_FSI, 256)
STNmean <- b.mean(evbased_STN, 256)
GPemean <- b.mean(evbased_GPe, 256)
SNrmean <- b.mean(evbased_SNr, 256)
print (sprintf ("Event based model, D1 mean: %f spikes with stderr %f", mean(evbased_D1), D1mean$std.err))
print (sprintf ("Event based model, D2 mean: %f spikes with stderr %f", mean(evbased_D2), D2mean$std.err))
print (sprintf ("Event based model, FSI mean: %f spikes with stderr %f", mean(evbased_FSI), FSImean$std.err))
print (sprintf ("Event based model, STN mean: %f spikes with stderr %f", mean(evbased_STN), STNmean$std.err))
print (sprintf ("Event based model, GPe mean: %f spikes with stderr %f", mean(evbased_GPe), GPemean$std.err))
print (sprintf ("Event based model, SNr mean: %f spikes with stderr %f", mean(evbased_SNr), SNrmean$std.err))
print ("*********** Difference/Stderr *************")
D1diff <- b.diffste (evbased_D1, impt_D1, 256)
print (sprintf ("For D1 evbased/impulsebased, difference is %f, standard error estimate: %f", D1diff$meandiff, D1diff$stderr))
D2diff <- b.diffste (evbased_D2, impt_D2, 256)
print (sprintf ("For D2 evbased/impulsebased, difference is %f, standard error estimate: %f", D2diff$meandiff, D2diff$stderr))
FSIdiff <- b.diffste (evbased_FSI, impt_FSI, 256)
print (sprintf ("For FSI evbased/impulsebased, difference is %f, standard error estimate: %f", FSIdiff$meandiff, FSIdiff$stderr))
STNdiff <- b.diffste (evbased_STN, impt_STN, 256)
print (sprintf ("For STN evbased/impulsebased, difference is %f, standard error estimate: %f", STNdiff$meandiff, STNdiff$stderr))
GPediff <- b.diffste (evbased_GPe, impt_GPe, 256)
print (sprintf ("For GPe evbased/impulsebased, difference is %f, standard error estimate: %f", GPediff$meandiff, GPediff$stderr))
SNrdiff <- b.diffste (evbased_SNr, impt_SNr, 256)
print (sprintf ("For SNr evbased/impulsebased, difference is %f, standard error estimate: %f", SNrdiff$meandiff, SNrdiff$stderr))
print ("*********** Studentized t-test *************")
D1_ttest <- b.ttest_equalityofmeans(evbased_D1,impt_D1,10000)
b.showsiglev (D1_ttest, "Evbased vs Impulse based D1 population")
D2_ttest <- b.ttest_equalityofmeans(evbased_D2,impt_D2,10000)
b.showsiglev (D2_ttest, "Evbased vs Impulse based D2 population")
STN_ttest <- b.ttest_equalityofmeans(evbased_STN,impt_STN,10000)
b.showsiglev (STN_ttest, "Evbased vs Impulse based STN population")
FSI_ttest <- b.ttest_equalityofmeans(evbased_FSI,impt_FSI,10000)
b.showsiglev (FSI_ttest, "Evbased vs Impulse based FSI population")
GPe_ttest <- b.ttest_equalityofmeans(evbased_GPe,impt_GPe,10000)
b.showsiglev (GPe_ttest, "Evbased vs Impulse based GPe population")
SNr_ttest <- b.ttest_equalityofmeans(evbased_SNr,impt_SNr,10000)
b.showsiglev (SNr_ttest, "Evbased vs Impulse based SNr population")
|
#' Helper functions for calculating cognitive complexity.
#'
#' \code{normalize_string} takes a character vector and normalizes its input using the symbols 0, 1, 2...9. \code{count_class} takes a character vector and an integer \code{alphabet} (with the restriction that the number of different symbols in the character vector doesn't exceed \code{alphabet}) and returns the total number of strings that are equivalent to the input when normalized and considering \code{alphabet}. \code{alternations} returns the number of alternations of symbols in a string.
#'
#' @usage normalize_string(string)
#'
#' count_class(string, alphabet)
#'
#' alternations(string, proportion = FALSE)
#'
#' @param string \code{character} vector containing the to be analyzed strings (can contain multiple strings).
#' @param alphabet \code{numeric}, the number of possible symbols (not necessarily actually appearing in string).
#' @param proportion \code{boolean}, indicating if the result from \code{alternation} should be given as a proportion (between 0 and 1) or the raw number of alternations (default is \code{FALSE} correpsonding to raw values).
#'
#' @return
#' \describe{
#' \item{\code{normalize_string}}{A normalized vector of strings of the same length as \code{string}.}
#' \item{\code{count_class}}{A vector of the same length as \code{string} with the number of possible equivalent strings when \code{string} is normalized and considering \code{alphabet}.}
#' \item{\code{alternations}}{A vector with the number (or proprtion) of alternations of the same length as \code{string}}
#' }
#'
#' @details nothing yet.
#'
#' @name normalize_string
#' @aliases normalize_string count_class alternations
#' @export normalize_string count_class alternations
#'
#' @examples
#'
#' #normalize_string:
#' normalize_string(c("HUHHEGGTE", "EGGHHU"))
#'
#' normalize_string("293948837163536")
#'
#' # count_class
#' count_class("010011",2)
#'
#' count_class("332120",4)
#'
#' count_class(c("HUHHEGGTE", "EGGHHU"), 5)
#' count_class(c("HUHHEGGTE", "EGGHHU"), 6)
#'
#' # alternations:
#' alternations("0010233")
#' alternations("0010233", proportion = TRUE)
#'
#' alternations(c("HUHHEGGTE", "EGGHHU"))
#' alternations(c("HUHHEGGTE", "EGGHHU"), proportion = TRUE)
#'
normalize_string <- function(string){
splitted <- strsplit(string, "")
elements <- lapply(splitted, unique)
if (any(vapply(elements, length, 0)>10)) stop("two many symbols (more than 10)")
exchanged <- mapply(function(x, y) seq(0, length.out = length(x))[match(y, x)], elements, splitted, SIMPLIFY=FALSE)
#data.frame(string = vapply(exchanged, paste, "", collapse = ""), symbols = vapply(exchanged, max, 0)+1, stringsAsFactors = FALSE)
vapply(exchanged, paste, "", collapse = "")
}
########## CountClass
# defines function CountClass(string)=number of strings in the class of string
# str is a string, alphabet the number of possible symbols (not necessarily actually appearing in str).
# str must be normalized (or add the 2d line)
count_class <- function(string,alphabet){
string <- normalize_string(string) # needs not be done for normalized strings
splitted <- lapply(strsplit(string, ""), as.numeric)
k <- vapply(splitted, max, 0) + 1
max(k)
if (any(k > alphabet)) stop("alphabet needs to be larger as the number of elements in each string.")
## to avoid unnecessary computations, compute factorial only for unique ks:
unique.ks <- unique(k)
tmp.results <- factorial(alphabet)/factorial(alphabet-unique.ks)
tmp.results[match(k, unique.ks)]
}
alternations <- function(string, proportion = FALSE) # if prop=FALSE, returns the number of alternations. Is prop=TRUE, returns the proportion of alternations.
{
l <- nchar(string)
splitted <- strsplit(string, "")
a <- vapply(splitted, function(x) length(rle(x)$length) - 1, 0)
if (proportion) a <- a/(l-1)
return(a)
}
check_string <- function(string) {
if (!is.vector(string, mode = "character")) stop("string must be a character vector.")
}
| /R/helper.R | no_license | singmann/acss | R | false | false | 4,092 | r | #' Helper functions for calculating cognitive complexity.
#'
#' \code{normalize_string} takes a character vector and normalizes its input using the symbols 0, 1, 2...9. \code{count_class} takes a character vector and an integer \code{alphabet} (with the restriction that the number of different symbols in the character vector doesn't exceed \code{alphabet}) and returns the total number of strings that are equivalent to the input when normalized and considering \code{alphabet}. \code{alternations} returns the number of alternations of symbols in a string.
#'
#' @usage normalize_string(string)
#'
#' count_class(string, alphabet)
#'
#' alternations(string, proportion = FALSE)
#'
#' @param string \code{character} vector containing the to be analyzed strings (can contain multiple strings).
#' @param alphabet \code{numeric}, the number of possible symbols (not necessarily actually appearing in string).
#' @param proportion \code{boolean}, indicating if the result from \code{alternation} should be given as a proportion (between 0 and 1) or the raw number of alternations (default is \code{FALSE} correpsonding to raw values).
#'
#' @return
#' \describe{
#' \item{\code{normalize_string}}{A normalized vector of strings of the same length as \code{string}.}
#' \item{\code{count_class}}{A vector of the same length as \code{string} with the number of possible equivalent strings when \code{string} is normalized and considering \code{alphabet}.}
#' \item{\code{alternations}}{A vector with the number (or proprtion) of alternations of the same length as \code{string}}
#' }
#'
#' @details nothing yet.
#'
#' @name normalize_string
#' @aliases normalize_string count_class alternations
#' @export normalize_string count_class alternations
#'
#' @examples
#'
#' #normalize_string:
#' normalize_string(c("HUHHEGGTE", "EGGHHU"))
#'
#' normalize_string("293948837163536")
#'
#' # count_class
#' count_class("010011",2)
#'
#' count_class("332120",4)
#'
#' count_class(c("HUHHEGGTE", "EGGHHU"), 5)
#' count_class(c("HUHHEGGTE", "EGGHHU"), 6)
#'
#' # alternations:
#' alternations("0010233")
#' alternations("0010233", proportion = TRUE)
#'
#' alternations(c("HUHHEGGTE", "EGGHHU"))
#' alternations(c("HUHHEGGTE", "EGGHHU"), proportion = TRUE)
#'
normalize_string <- function(string){
splitted <- strsplit(string, "")
elements <- lapply(splitted, unique)
if (any(vapply(elements, length, 0)>10)) stop("two many symbols (more than 10)")
exchanged <- mapply(function(x, y) seq(0, length.out = length(x))[match(y, x)], elements, splitted, SIMPLIFY=FALSE)
#data.frame(string = vapply(exchanged, paste, "", collapse = ""), symbols = vapply(exchanged, max, 0)+1, stringsAsFactors = FALSE)
vapply(exchanged, paste, "", collapse = "")
}
########## CountClass
# defines function CountClass(string)=number of strings in the class of string
# str is a string, alphabet the number of possible symbols (not necessarily actually appearing in str).
# str must be normalized (or add the 2d line)
count_class <- function(string,alphabet){
string <- normalize_string(string) # needs not be done for normalized strings
splitted <- lapply(strsplit(string, ""), as.numeric)
k <- vapply(splitted, max, 0) + 1
max(k)
if (any(k > alphabet)) stop("alphabet needs to be larger as the number of elements in each string.")
## to avoid unnecessary computations, compute factorial only for unique ks:
unique.ks <- unique(k)
tmp.results <- factorial(alphabet)/factorial(alphabet-unique.ks)
tmp.results[match(k, unique.ks)]
}
alternations <- function(string, proportion = FALSE) # if prop=FALSE, returns the number of alternations. Is prop=TRUE, returns the proportion of alternations.
{
l <- nchar(string)
splitted <- strsplit(string, "")
a <- vapply(splitted, function(x) length(rle(x)$length) - 1, 0)
if (proportion) a <- a/(l-1)
return(a)
}
check_string <- function(string) {
if (!is.vector(string, mode = "character")) stop("string must be a character vector.")
}
|
#' chandwich: Chandler-Bate Sandwich Loglikelihood Adjustment
#'
#' Performs adjustments of an independence loglikelihood using
#' a robust sandwich estimator of the parameter covariance matrix, based on
#' the methodology in Chandler and Bate (2007). This can be used for cluster
#' correlated data when interest lies in the parameters of the marginal
#' distributions. Functions for profiling the adjusted loglikelihoods are also
#' provided, as are functions for calculating and plotting confidence
#' intervals, for single model parameters, and confidence regions, for pairs of
#' model parameters.
#'
#' @details
#' The main function in the chandwich package is \code{adjust_loglik}. It
#' finds the maximum likelihood estimate (MLE) of model parameters based on
#' an independence loglikelihood in which cluster dependence in the data is
#' ignored. The independence loglikelihood is adjusted in a way that ensures
#' that the Hessian of the adjusted loglikelihood coincides with a robust
#' sandwich estimate of the parameter covariance at the MLE. Three
#' adjustments are available: one in which the independence loglikelihood
#' itself is scaled (vertical scaling) and two others where the scaling
#' is in the parameter vector (horizontal scaling).
#'
#' See Chandler and Bate (2007) for full details and
#' \code{vignette("chandwich-vignette", package = "chandwich")} for an
#' overview of the package.
#' @references Chandler, R. E. and Bate, S. (2007). Inference for clustered
#' data using the independence loglikelihood. \emph{Biometrika},
#' \strong{94}(1), 167-183. \doi{10.1093/biomet/asm015}
#' @seealso \code{\link{adjust_loglik}} to adjust a user-supplied
#' loglikelihood.
#' @seealso \code{\link{compare_models}} to compare nested models using an
#' adjusted loglikelihood ratio test. See also the S3 method
#' \code{\link{anova.chandwich}}.
#' @seealso \code{\link{conf_intervals}} to calculate confidence intervals
#' for individual model parameters. See also the S3 method
#' \code{\link{confint.chandwich}}.
#' @seealso \code{\link{conf_region}} to calculate a confidence region
#' for a pair of model parameters.
#' @docType package
#' @aliases chandwich-package chandwich
#' @import methods
"_PACKAGE"
#' Oxford and Worthing annual maximum temperatures
#'
#' Annual maximum temperatures, in degrees Fahrenheit, at Oxford and
#' Worthing (England), for the period 1901 to 1980.
#'
#' @format A dataframe with 80 rows and 2 columns, named Oxford and Worthing.
#' @source Tabony, R. C. (1983) Extreme value analysis in meteorology.
#' \emph{The Meteorological Magazine}, \strong{112}, 77-98.
#' @references Chandler, R. E. and Bate, S. (2007). Inference for clustered
#' data using the independence loglikelihood. \emph{Biometrika},
#' \strong{94}(1), 167-183. \doi{10.1093/biomet/asm015}
"owtemps"
#' Rat tumor data
#'
#' Tumor incidence in 71 groups of rate from Tarone (1982).
#' The matrix \code{rat} has 71 rows and 2 columns.
#' Each row relates to a different group of rats.
#' The first column (\code{y}) contains the number of rats with tumors.
#' The second column (\code{n}) contains the total number of rats.
#'
#' @format A matrix with 71 rows and 2 columns.
#' @source Table 5.1 of Gelman, A., Carlin, J. B., Stern, H. S. Dunson, D. B.,
#' Vehtari, A. and Rubin, D. B. (2013) \emph{Bayesian Data Analysis},
#' Chapman & Hall / CRC.
#' \url{http://www.stat.columbia.edu/~gelman/book/data/rats.asc}
#' @references Tarone, R. E. (1982) The use of historical information in
#' testing for a trend in proportions. \emph{Biometrics}, \strong{38},
#' 215-220.
"rats"
| /R/chandwich-package.R | no_license | cran/chandwich | R | false | false | 3,711 | r | #' chandwich: Chandler-Bate Sandwich Loglikelihood Adjustment
#'
#' Performs adjustments of an independence loglikelihood using
#' a robust sandwich estimator of the parameter covariance matrix, based on
#' the methodology in Chandler and Bate (2007). This can be used for cluster
#' correlated data when interest lies in the parameters of the marginal
#' distributions. Functions for profiling the adjusted loglikelihoods are also
#' provided, as are functions for calculating and plotting confidence
#' intervals, for single model parameters, and confidence regions, for pairs of
#' model parameters.
#'
#' @details
#' The main function in the chandwich package is \code{adjust_loglik}. It
#' finds the maximum likelihood estimate (MLE) of model parameters based on
#' an independence loglikelihood in which cluster dependence in the data is
#' ignored. The independence loglikelihood is adjusted in a way that ensures
#' that the Hessian of the adjusted loglikelihood coincides with a robust
#' sandwich estimate of the parameter covariance at the MLE. Three
#' adjustments are available: one in which the independence loglikelihood
#' itself is scaled (vertical scaling) and two others where the scaling
#' is in the parameter vector (horizontal scaling).
#'
#' See Chandler and Bate (2007) for full details and
#' \code{vignette("chandwich-vignette", package = "chandwich")} for an
#' overview of the package.
#' @references Chandler, R. E. and Bate, S. (2007). Inference for clustered
#' data using the independence loglikelihood. \emph{Biometrika},
#' \strong{94}(1), 167-183. \doi{10.1093/biomet/asm015}
#' @seealso \code{\link{adjust_loglik}} to adjust a user-supplied
#' loglikelihood.
#' @seealso \code{\link{compare_models}} to compare nested models using an
#' adjusted loglikelihood ratio test. See also the S3 method
#' \code{\link{anova.chandwich}}.
#' @seealso \code{\link{conf_intervals}} to calculate confidence intervals
#' for individual model parameters. See also the S3 method
#' \code{\link{confint.chandwich}}.
#' @seealso \code{\link{conf_region}} to calculate a confidence region
#' for a pair of model parameters.
#' @docType package
#' @aliases chandwich-package chandwich
#' @import methods
"_PACKAGE"
#' Oxford and Worthing annual maximum temperatures
#'
#' Annual maximum temperatures, in degrees Fahrenheit, at Oxford and
#' Worthing (England), for the period 1901 to 1980.
#'
#' @format A dataframe with 80 rows and 2 columns, named Oxford and Worthing.
#' @source Tabony, R. C. (1983) Extreme value analysis in meteorology.
#' \emph{The Meteorological Magazine}, \strong{112}, 77-98.
#' @references Chandler, R. E. and Bate, S. (2007). Inference for clustered
#' data using the independence loglikelihood. \emph{Biometrika},
#' \strong{94}(1), 167-183. \doi{10.1093/biomet/asm015}
"owtemps"
#' Rat tumor data
#'
#' Tumor incidence in 71 groups of rate from Tarone (1982).
#' The matrix \code{rat} has 71 rows and 2 columns.
#' Each row relates to a different group of rats.
#' The first column (\code{y}) contains the number of rats with tumors.
#' The second column (\code{n}) contains the total number of rats.
#'
#' @format A matrix with 71 rows and 2 columns.
#' @source Table 5.1 of Gelman, A., Carlin, J. B., Stern, H. S. Dunson, D. B.,
#' Vehtari, A. and Rubin, D. B. (2013) \emph{Bayesian Data Analysis},
#' Chapman & Hall / CRC.
#' \url{http://www.stat.columbia.edu/~gelman/book/data/rats.asc}
#' @references Tarone, R. E. (1982) The use of historical information in
#' testing for a trend in proportions. \emph{Biometrics}, \strong{38},
#' 215-220.
"rats"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survey_utils.R
\name{jose_test}
\alias{jose_test}
\title{A test for Jose.}
\usage{
jose_test()
}
\description{
A test for Jose.
}
| /man/jose_test.Rd | no_license | blueprint-ade/bputils | R | false | true | 208 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survey_utils.R
\name{jose_test}
\alias{jose_test}
\title{A test for Jose.}
\usage{
jose_test()
}
\description{
A test for Jose.
}
|
#This function removes peaks that have an FDR or pvalue that is higher than the threshold.
findSignificantPeaks = function(peaks,FDR,pvalue){
peaks=peaks[which((peaks$FDR<=FDR)&(peaks$p_value<=pvalue)),]
peaks
}
#This function finds the same Associate Gene Name in genes2 as in genes1 and adds its description entry to genes1's description column, overwriting anything that may have already been there
changeGeneDesc = function(genes1,genes2){
genes1$Description=as.character(lapply(as.character(genes1$Gene_name),function(x) paste(as.character(genes2$Description[which(as.character(genes2$Gene_name)==x)]),collapse='; ')))
genes1$Description[which(genes1$Description=='character(0)')]=''
genes1
}
#This function finds gene indices for the genes that each peak is within distance bp of, separating them into three groups where the peak is either 'in gene' or 'upstream'/'downstream' of the gene.
findGeneInds = function(peaks,genes,dist){
#in gene: gene start < peak summit < gene end
ingene=apply(peaks,1,function(x) which((genes$Chromosome==as.numeric(x[1]))&(genes$Gene_start_bp<as.numeric(x[2]))&(genes$Gene_end_bp>as.numeric(x[3]))))
#ingene=apply(peaks,1,function(x) which((genes$Chromosome==as.numeric(x[1]))&(genes$Gene.Start..bp.<as.numeric(x[2]))&(genes$Gene.End..bp.>as.numeric(x[3]))))
#downstream: 0 < (gene start - peak end) < dist, or, 0 (gene start - peak summit) < dist
downstream=apply( peaks,1,function(x) which( (genes$Chromosome==as.numeric(x[1])) & ( ( ((genes$Gene_start_bp-as.numeric(x[3]))<dist) & ((genes$Gene_start_bp-as.numeric(x[3]))>0) ) | ( ((genes$Gene_start_bp-as.numeric(x[5]))<dist) & ((genes$Gene_start_bp-as.numeric(x[5]))>0) ) ) ) )
#downstream=apply( peaks,1,function(x) which( (genes$Chromosome.Number==as.numeric(x[1])) & ( ( ((genes$Gene.Start..bp.-as.numeric(x[3]))<dist) & ((genes$Gene.Start..bp.-as.numeric(x[3]))>0) ) | ( ((genes$Gene.Start..bp.-as.numeric(x[5]))<dist) & ((genes$Gene.Start..bp.-as.numeric(x[5]))>0) ) ) ) )
#upstream: -dist < (gene end - peak start) < 0, or, -dist < (gene end - peak summit) < 0
upstream=apply(peaks,1,function(x) which( (genes$Chromosome==as.numeric(x[1])) & ( ( ((genes$Gene_end_bp-as.numeric(x[2]))<0) & ((genes$Gene_end_bp-as.numeric(x[2]))>-dist) ) | ( ((genes$Gene_end_bp-as.numeric(x[5]))<0) & ((genes$Gene_end_bp-as.numeric(x[5]))>-dist) ) ) ))
#upstream=apply(peaks,1,function(x) which( (genes$Chromosome.Number==as.numeric(x[1])) & ( ( ((genes$Gene.End..bp.-as.numeric(x[2]))<0) & ((genes$Gene.End..bp.-as.numeric(x[2]))>-dist) ) | ( ((genes$Gene.End..bp.-as.numeric(x[5]))<0) & ((genes$Gene.End..bp.-as.numeric(x[5]))>-dist) ) ) ))
GeneInds=list(ingene,downstream,upstream)
#browser()
GeneInds
}
#This function produces a table where each row contains information about a peak along with its associated genes
generatePeakTable = function(peaks,genes,GeneInds){
ingene=GeneInds[[1]]
downstream=GeneInds[[2]]
upstream=GeneInds[[3]]
peakTable=array(NA,dim=c(length(peaks[,1]),13))
peakTable=data.frame(peakTable)
colnames(peakTable)=c(colnames(peaks),"Genes","Peak.Pos","Dist.to.Start")
#browser()
for(peak in 1:length(peaks[,1])){
peakTable[peak, 2:10]=peaks[peak,2:10]
if (peaks[peak, 1] == 998){
peakTable[peak, 1] = as.character('X')
} else if (peaks[peak, 1] == 999){
peakTable[peak, 1] = as.character('Y')
} else {
peakTable[peak, 1] = peaks[peak,1]
}
ingene_genes=as.character(genes$Gene_name[ingene[[peak]]])
downstream_genes=as.character(genes$Gene_name[downstream[[peak]]])
upstream_genes=as.character(genes$Gene_name[upstream[[peak]]])
genelist=paste(c(ingene_genes,downstream_genes,upstream_genes),collapse=", ")
ingene_tags=rep("in gene",length(ingene[[peak]]))
downstream_tags=rep("downstream",length(downstream[[peak]]))
upstream_tags=rep("upstream",length(upstream[[peak]]))
taglist=paste(c(ingene_tags,downstream_tags,upstream_tags),collapse=", ")
dists=paste(genes$Gene_start_bp[c(ingene[[peak]],downstream[[peak]],upstream[[peak]])]-peaks$summit[peak],collapse=", ")
peakTable[peak,11:13]=c(genelist,taglist,dists)
}
peakTable
}
generateGeneTable = function(peaks,genes,GeneInds,peakTable,fileName){
allGeneInds=sort(unique(unlist(GeneInds)))
genelist=unlist(strsplit(as.character(peakTable$Genes),split=", "))
poslist=unlist(strsplit(as.character(peakTable$Peak.Pos),split=", "))
distlist=unlist(strsplit(as.character(peakTable$Dist.to.Start),split=", "))
fileName=tail(strsplit(fileName,split='/')[[1]],n=1)
geneTable=array(NA,dim=c(length(allGeneInds),15))
geneTable=data.frame(geneTable)
geneColNms = colnames(genes)
colnames(geneTable)=c(geneColNms[c(6,3,1,4,5,7,2,8)],"Peak.Pos","Dist.to.Gene.Start","Peaks", "Mean.Pileup", "Mean-log10(PValue)", "Max Pileup", "Max-log10(PValue)")
# browser()
for(gene in 1:length(allGeneInds)){
# Chromosome
if(genes$Chromosome[allGeneInds[gene]] == 998){
geneTable[gene, 1] = as.character('X')
} else if(genes$Chromosome[allGeneInds[gene]] == 999){
geneTable[gene, 1] = as.character('Y')
} else {
geneTable[gene, 1] = as.character((genes$Chromosome[allGeneInds[gene]]))
}
# Gene name
geneTable[gene, 2] = as.character((genes$Gene_name[allGeneInds[gene]]))
# Ensemble ID
geneTable[gene, 3] = as.character((genes$Gene_stable_ID[allGeneInds[gene]]))
# Start bp
geneTable[gene, 4] = as.character((genes$Gene_start_bp[allGeneInds[gene]]))
# End bp
geneTable[gene, 5] = as.character((genes$Gene_end_bp[allGeneInds[gene]]))
# Strand
geneTable[gene, 6] = as.character((genes$Strand[allGeneInds[gene]]))
# Description
geneTable[gene, 7] = as.character((genes$Gene_description[allGeneInds[gene]]))
# Type
geneTable[gene, 8] = as.character((genes$Gene_type[allGeneInds[gene]]))
goi=as.character(genes$Gene_name[allGeneInds[gene]])
#geneTable[gene,2]=goi
#geneTable[gene,c(1,3:5)]=as.character(genes[allGeneInds[gene],c(1,3:5)])
#geneTable[gene,6]=as.character(genes[allGeneInds[gene],6])
cc <- grep(paste('(^|[ ,]+)',goi,'([ ,]|$)',sep=''),peakTable$Genes) # peak indexes
peaks=paste(paste(fileName,'_',cc,sep=''),collapse=", ")
pos=paste(poslist[which(genelist==goi)],collapse=", ")
dists=paste(distlist[which(genelist==goi)],collapse=", ")
mnPileup <- mean(peakTable$pileup[cc])
mnp <- mean(peakTable$X.log10.pvalue.[cc])
maxPileUp <- max(peakTable$pileup[cc])
mxp <- max(peakTable$X.log10.pvalue.[cc])
geneTable[gene,9:15]=c(pos,dists, peaks, mnPileup, mnp, maxPileUp , mxp)
}
geneTable
}
#This function writes both peakTable and geneTable to tab-delimited files which can be read into Excel
writetoxls = function(peakTable,geneTable,outputPath){
fileName=tail(strsplit(outputPath,split='/')[[1]],n=1)
peakColNames=colnames(peakTable)
peakTable$Peak.ID = paste(fileName,'_',rownames(peakTable),sep='')
peakTable = peakTable[,c('Peak.ID',peakColNames)]
write.table(peakTable,file=paste(outputPath,'_peakTable.xls',sep=''),sep="\t",row.names=F)
write.table(geneTable,file=paste(outputPath,'_geneTable.xls',sep=''),sep="\t",row.names=F)
}
#This function compares two lists of genes to find common genes and unique genes to each list, writes them to file, and produces a crude venn diagram. A better venn diagram can be produced using the venn.diagram function from the library "VennDiagram".
| /findGeneFunctions.r | no_license | NachoDave/GeneTableMaker | R | false | false | 7,368 | r | #This function removes peaks that have an FDR or pvalue that is higher than the threshold.
findSignificantPeaks = function(peaks,FDR,pvalue){
peaks=peaks[which((peaks$FDR<=FDR)&(peaks$p_value<=pvalue)),]
peaks
}
#This function finds the same Associate Gene Name in genes2 as in genes1 and adds its description entry to genes1's description column, overwriting anything that may have already been there
changeGeneDesc = function(genes1,genes2){
genes1$Description=as.character(lapply(as.character(genes1$Gene_name),function(x) paste(as.character(genes2$Description[which(as.character(genes2$Gene_name)==x)]),collapse='; ')))
genes1$Description[which(genes1$Description=='character(0)')]=''
genes1
}
#This function finds gene indices for the genes that each peak is within distance bp of, separating them into three groups where the peak is either 'in gene' or 'upstream'/'downstream' of the gene.
findGeneInds = function(peaks,genes,dist){
#in gene: gene start < peak summit < gene end
ingene=apply(peaks,1,function(x) which((genes$Chromosome==as.numeric(x[1]))&(genes$Gene_start_bp<as.numeric(x[2]))&(genes$Gene_end_bp>as.numeric(x[3]))))
#ingene=apply(peaks,1,function(x) which((genes$Chromosome==as.numeric(x[1]))&(genes$Gene.Start..bp.<as.numeric(x[2]))&(genes$Gene.End..bp.>as.numeric(x[3]))))
#downstream: 0 < (gene start - peak end) < dist, or, 0 (gene start - peak summit) < dist
downstream=apply( peaks,1,function(x) which( (genes$Chromosome==as.numeric(x[1])) & ( ( ((genes$Gene_start_bp-as.numeric(x[3]))<dist) & ((genes$Gene_start_bp-as.numeric(x[3]))>0) ) | ( ((genes$Gene_start_bp-as.numeric(x[5]))<dist) & ((genes$Gene_start_bp-as.numeric(x[5]))>0) ) ) ) )
#downstream=apply( peaks,1,function(x) which( (genes$Chromosome.Number==as.numeric(x[1])) & ( ( ((genes$Gene.Start..bp.-as.numeric(x[3]))<dist) & ((genes$Gene.Start..bp.-as.numeric(x[3]))>0) ) | ( ((genes$Gene.Start..bp.-as.numeric(x[5]))<dist) & ((genes$Gene.Start..bp.-as.numeric(x[5]))>0) ) ) ) )
#upstream: -dist < (gene end - peak start) < 0, or, -dist < (gene end - peak summit) < 0
upstream=apply(peaks,1,function(x) which( (genes$Chromosome==as.numeric(x[1])) & ( ( ((genes$Gene_end_bp-as.numeric(x[2]))<0) & ((genes$Gene_end_bp-as.numeric(x[2]))>-dist) ) | ( ((genes$Gene_end_bp-as.numeric(x[5]))<0) & ((genes$Gene_end_bp-as.numeric(x[5]))>-dist) ) ) ))
#upstream=apply(peaks,1,function(x) which( (genes$Chromosome.Number==as.numeric(x[1])) & ( ( ((genes$Gene.End..bp.-as.numeric(x[2]))<0) & ((genes$Gene.End..bp.-as.numeric(x[2]))>-dist) ) | ( ((genes$Gene.End..bp.-as.numeric(x[5]))<0) & ((genes$Gene.End..bp.-as.numeric(x[5]))>-dist) ) ) ))
GeneInds=list(ingene,downstream,upstream)
#browser()
GeneInds
}
#This function produces a table where each row contains information about a peak along with its associated genes
generatePeakTable = function(peaks,genes,GeneInds){
ingene=GeneInds[[1]]
downstream=GeneInds[[2]]
upstream=GeneInds[[3]]
peakTable=array(NA,dim=c(length(peaks[,1]),13))
peakTable=data.frame(peakTable)
colnames(peakTable)=c(colnames(peaks),"Genes","Peak.Pos","Dist.to.Start")
#browser()
for(peak in 1:length(peaks[,1])){
peakTable[peak, 2:10]=peaks[peak,2:10]
if (peaks[peak, 1] == 998){
peakTable[peak, 1] = as.character('X')
} else if (peaks[peak, 1] == 999){
peakTable[peak, 1] = as.character('Y')
} else {
peakTable[peak, 1] = peaks[peak,1]
}
ingene_genes=as.character(genes$Gene_name[ingene[[peak]]])
downstream_genes=as.character(genes$Gene_name[downstream[[peak]]])
upstream_genes=as.character(genes$Gene_name[upstream[[peak]]])
genelist=paste(c(ingene_genes,downstream_genes,upstream_genes),collapse=", ")
ingene_tags=rep("in gene",length(ingene[[peak]]))
downstream_tags=rep("downstream",length(downstream[[peak]]))
upstream_tags=rep("upstream",length(upstream[[peak]]))
taglist=paste(c(ingene_tags,downstream_tags,upstream_tags),collapse=", ")
dists=paste(genes$Gene_start_bp[c(ingene[[peak]],downstream[[peak]],upstream[[peak]])]-peaks$summit[peak],collapse=", ")
peakTable[peak,11:13]=c(genelist,taglist,dists)
}
peakTable
}
generateGeneTable = function(peaks,genes,GeneInds,peakTable,fileName){
allGeneInds=sort(unique(unlist(GeneInds)))
genelist=unlist(strsplit(as.character(peakTable$Genes),split=", "))
poslist=unlist(strsplit(as.character(peakTable$Peak.Pos),split=", "))
distlist=unlist(strsplit(as.character(peakTable$Dist.to.Start),split=", "))
fileName=tail(strsplit(fileName,split='/')[[1]],n=1)
geneTable=array(NA,dim=c(length(allGeneInds),15))
geneTable=data.frame(geneTable)
geneColNms = colnames(genes)
colnames(geneTable)=c(geneColNms[c(6,3,1,4,5,7,2,8)],"Peak.Pos","Dist.to.Gene.Start","Peaks", "Mean.Pileup", "Mean-log10(PValue)", "Max Pileup", "Max-log10(PValue)")
# browser()
for(gene in 1:length(allGeneInds)){
# Chromosome
if(genes$Chromosome[allGeneInds[gene]] == 998){
geneTable[gene, 1] = as.character('X')
} else if(genes$Chromosome[allGeneInds[gene]] == 999){
geneTable[gene, 1] = as.character('Y')
} else {
geneTable[gene, 1] = as.character((genes$Chromosome[allGeneInds[gene]]))
}
# Gene name
geneTable[gene, 2] = as.character((genes$Gene_name[allGeneInds[gene]]))
# Ensemble ID
geneTable[gene, 3] = as.character((genes$Gene_stable_ID[allGeneInds[gene]]))
# Start bp
geneTable[gene, 4] = as.character((genes$Gene_start_bp[allGeneInds[gene]]))
# End bp
geneTable[gene, 5] = as.character((genes$Gene_end_bp[allGeneInds[gene]]))
# Strand
geneTable[gene, 6] = as.character((genes$Strand[allGeneInds[gene]]))
# Description
geneTable[gene, 7] = as.character((genes$Gene_description[allGeneInds[gene]]))
# Type
geneTable[gene, 8] = as.character((genes$Gene_type[allGeneInds[gene]]))
goi=as.character(genes$Gene_name[allGeneInds[gene]])
#geneTable[gene,2]=goi
#geneTable[gene,c(1,3:5)]=as.character(genes[allGeneInds[gene],c(1,3:5)])
#geneTable[gene,6]=as.character(genes[allGeneInds[gene],6])
cc <- grep(paste('(^|[ ,]+)',goi,'([ ,]|$)',sep=''),peakTable$Genes) # peak indexes
peaks=paste(paste(fileName,'_',cc,sep=''),collapse=", ")
pos=paste(poslist[which(genelist==goi)],collapse=", ")
dists=paste(distlist[which(genelist==goi)],collapse=", ")
mnPileup <- mean(peakTable$pileup[cc])
mnp <- mean(peakTable$X.log10.pvalue.[cc])
maxPileUp <- max(peakTable$pileup[cc])
mxp <- max(peakTable$X.log10.pvalue.[cc])
geneTable[gene,9:15]=c(pos,dists, peaks, mnPileup, mnp, maxPileUp , mxp)
}
geneTable
}
#This function writes both peakTable and geneTable to tab-delimited files which can be read into Excel
writetoxls = function(peakTable,geneTable,outputPath){
fileName=tail(strsplit(outputPath,split='/')[[1]],n=1)
peakColNames=colnames(peakTable)
peakTable$Peak.ID = paste(fileName,'_',rownames(peakTable),sep='')
peakTable = peakTable[,c('Peak.ID',peakColNames)]
write.table(peakTable,file=paste(outputPath,'_peakTable.xls',sep=''),sep="\t",row.names=F)
write.table(geneTable,file=paste(outputPath,'_geneTable.xls',sep=''),sep="\t",row.names=F)
}
#This function compares two lists of genes to find common genes and unique genes to each list, writes them to file, and produces a crude venn diagram. A better venn diagram can be produced using the venn.diagram function from the library "VennDiagram".
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
library(leaflet)
shinyUI(
navbarPage(theme = shinytheme("darkly"), title=img(src="NoHate.jpeg", height = 30, width=45), id="nav", windowTitle = "Hate crime",
tabPanel("Introduction",
fluidRow(
tags$img(src = "hate.jpeg", class = "background", height ="180%", width="100%", style = "opacity: 0.5"),
absolutePanel(id = "text", class = "foreground-content",
top = "20%", left = "15%", right = "15%", width = "70%", fixed=FALSE,
draggable = FALSE, height = 200,
fluidRow(
style = "padding: 8%; background-color: white",
tags$h1("EndHate NYC", style="color:#20bc9c;font-weight:bold"),
tags$p("While the COVID-19 pandemic caused a nationwide increase in crimes due to widespread food, housing and healthcare insecurity, it also brought a significant increase in hate crimes (notably towards Asian-Americans), with New York City reportedly experiencing a 223% increase in hate crimes in the first quarter of 2021 compared to the previous year.", style="font-weight:bold;color:#18bc9c"),
tags$p("EndHate is a tool for individuals to learn more about the rise of hate crimes in NYC, and encourage them to report such events, increasing citizen engagement. This will help protect citizens, businesses and larger communities.", style="font-weight:bold;color:#18bc9c"),
tags$p("The following pages provide a map view of NYC crime complaints, their correspondence to COVID hospitalizations, and reported hate crimes during the pandemic.", style="font-weight:bold;color:#18bc9c")
),
style = "opacity: 0.85")
)),
tabPanel("Map",
div(class="outer map",
leafletOutput("map", width="100%", height=620),
absolutePanel(id = "choices", class = "panel panel-default",
top = 100, left = 25, width = 250, fixed=FALSE,
draggable = TRUE, height = "auto",
tags$h1("Please Select",
align = "left", style = "font-size:30px"),
selectInput("Month",
label = "Month",
choices = c('03/2020', '04/2020','05/2020','06/2020','07/2020','08/2020','09/2020','10/2020','11/2020','12/2020', '01/2021','02/2021','03/2021','04/2021', '05/2021','06/2021')
),
tags$h2("Type of Crimes",
align = "left",style = "font-size:15px"),
checkboxInput("criminal_mischief",
label = "criminal mischief", value = FALSE),
checkboxInput("grand_larceny",
label = "grand larceny", value = FALSE),
checkboxInput("burglary",
label = "burglary", value = FALSE),
checkboxInput("felony_assault",
label = "felony assault", value = FALSE),
checkboxInput("miscellaneous_penal_law",
label = "miscellaneous penal law", value = FALSE),
checkboxInput("motor_vehicle",
label = "grand larceny of motor vehicle", value = FALSE),
checkboxInput("robbery",
label = "robbery", value = FALSE),
checkboxInput("dangerous_weapons",
label = "dangerous weapons", value = FALSE),
style = "opacity: 0.80")
)
),
#"CRIMINAL MISCHIEF & RELATED OF"
# "GRAND LARCENY"
# "BURGLARY"
# "FELONY ASSAULT"
# "MISCELLANEOUS PENAL LAW"
# "GRAND LARCENY OF MOTOR VEHICLE"
# "ROBBERY"
# "DANGEROUS WEAPONS"
# selectInput("Month",
# label = "Month",
# choices = c('a', 'b', 'c')
# )
tabPanel("COVID and Crimes",
sidebarPanel(
selectInput("borough",
label = "Borough",
choices = c('Manhattan', 'Bronx', 'Queens', 'Brooklyn', 'Staten Island')
),
selectInput("crime",
label = "Type of crime",
choices = tolower(c("CRIMINAL MISCHIEF & RELATED OF", "GRAND LARCENY", "BURGLARY",
"FELONY ASSAULT", "MISCELLANEOUS PENAL LAW",
"GRAND LARCENY OF MOTOR VEHICLE",
"ROBBERY", "DANGEROUS WEAPONS"))
)
),
mainPanel(
plotOutput(outputId = "t3Plot1"),
plotOutput(outputId = "t3Plot2")
)
),
# [1] "ANTI-MALE HOMOSEXUAL (GAY)" "ANTI-WHITE"
# [3] "ANTI-MUSLIM" "ANTI-HISPANIC"
# [5] "ANTI-TRANSGENDER" "ANTI-JEWISH"
# [7] "ANTI-ASIAN" "ANTI-BLACK"
# [9] "ANTI-FEMALE HOMOSEXUAL (LESBIAN)" "ANTI-ARAB"
# [11] "ANTI-CATHOLIC" "ANTI-GENDER NON-CONFORMING"
# [13] "ANTI-FEMALE" "ANTI-LGBT (MIXED GROUP)"
# [15] "ANTI-MULTI-RACIAL GROUPS" "ANTI-OTHER ETHNICITY"
# [17] "60 YRS AND OLDER" "ANTI-HINDU"
# [19] "ANTI-BUDDHIST" "ANTI-JEHOVAHS WITNESS"
# [21] "ANTI-PHYSICAL DISABILITY" "ANTI-OTHER RELIGION"
# [23] "ANTI-RELIGIOUS PRACTICE GENERALLY"
tabPanel("Hate Crimes and COVID",
sidebarPanel(
selectInput("county",
label = "borough",
choices = c('Manhattan', 'Bronx', 'Queens', 'Brooklyn', 'Staten Island')
),
selectInput("bias",
label = "bias",
choices = c("ANTI-JEWISH", "ANTI-ASIAN", "ANTI-MALE HOMOSEXUAL (GAY)",
"ANTI-BLACK", "ANTI-WHITE")
)
),
mainPanel(
plotOutput(outputId = "t4Plot1"),
plotOutput(outputId = "t4Plot2")
)
),
tabPanel("Appendix",
fluidRow(
tags$img(src = "hate.jpeg", class = "background", height ="180%", width="100%", style = "opacity: 0.5"),
absolutePanel(id = "text", class = "foreground-content",
top = "20%", left = "15%", right = "15%", width = "70%", fixed=FALSE,
draggable = FALSE, height = 200,
fluidRow(
style = "padding: 8%; background-color: white",
tags$h1("Appendix", style="color:#20bc9c;font-weight:bold"),
tags$p("All data sources are from NYC Open Data. The data sources used can be found here.", style = "color;font-weight:bold"),
tags$p("Made with R Shiny. Packages used include dplyr, lubridate, shinythemes, leaflet, tidyr.
This project was created as part of Columbia University's STATGR5243 Applied Data Science course.", style = "color:#18bc9c;font-weight:bold"),
tags$p("Authors: Bohao Ma (Department of Statistics, Columbia University), Egem Yorulmaz (Department of Applied Mathematics, Columbia University), Qian Zhang (Department of Statistics, Columbia University),
Jiayi Nie (Columbia University).", style = "color:#18bc9c;font-weight:bold"),
tags$p("Github: See the code and data sources in our Github repository at https://github.com/TZstatsADS/Fall2021-Project2-group8", style = "color:#18bc9c;font-weight:bold")
),
style = "opacity: 0.85")
))
)
) | /app/ui.R | no_license | TZstatsADS/Fall2021-Project2-group8 | R | false | false | 10,119 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
library(leaflet)
shinyUI(
navbarPage(theme = shinytheme("darkly"), title=img(src="NoHate.jpeg", height = 30, width=45), id="nav", windowTitle = "Hate crime",
tabPanel("Introduction",
fluidRow(
tags$img(src = "hate.jpeg", class = "background", height ="180%", width="100%", style = "opacity: 0.5"),
absolutePanel(id = "text", class = "foreground-content",
top = "20%", left = "15%", right = "15%", width = "70%", fixed=FALSE,
draggable = FALSE, height = 200,
fluidRow(
style = "padding: 8%; background-color: white",
tags$h1("EndHate NYC", style="color:#20bc9c;font-weight:bold"),
tags$p("While the COVID-19 pandemic caused a nationwide increase in crimes due to widespread food, housing and healthcare insecurity, it also brought a significant increase in hate crimes (notably towards Asian-Americans), with New York City reportedly experiencing a 223% increase in hate crimes in the first quarter of 2021 compared to the previous year.", style="font-weight:bold;color:#18bc9c"),
tags$p("EndHate is a tool for individuals to learn more about the rise of hate crimes in NYC, and encourage them to report such events, increasing citizen engagement. This will help protect citizens, businesses and larger communities.", style="font-weight:bold;color:#18bc9c"),
tags$p("The following pages provide a map view of NYC crime complaints, their correspondence to COVID hospitalizations, and reported hate crimes during the pandemic.", style="font-weight:bold;color:#18bc9c")
),
style = "opacity: 0.85")
)),
tabPanel("Map",
div(class="outer map",
leafletOutput("map", width="100%", height=620),
absolutePanel(id = "choices", class = "panel panel-default",
top = 100, left = 25, width = 250, fixed=FALSE,
draggable = TRUE, height = "auto",
tags$h1("Please Select",
align = "left", style = "font-size:30px"),
selectInput("Month",
label = "Month",
choices = c('03/2020', '04/2020','05/2020','06/2020','07/2020','08/2020','09/2020','10/2020','11/2020','12/2020', '01/2021','02/2021','03/2021','04/2021', '05/2021','06/2021')
),
tags$h2("Type of Crimes",
align = "left",style = "font-size:15px"),
checkboxInput("criminal_mischief",
label = "criminal mischief", value = FALSE),
checkboxInput("grand_larceny",
label = "grand larceny", value = FALSE),
checkboxInput("burglary",
label = "burglary", value = FALSE),
checkboxInput("felony_assault",
label = "felony assault", value = FALSE),
checkboxInput("miscellaneous_penal_law",
label = "miscellaneous penal law", value = FALSE),
checkboxInput("motor_vehicle",
label = "grand larceny of motor vehicle", value = FALSE),
checkboxInput("robbery",
label = "robbery", value = FALSE),
checkboxInput("dangerous_weapons",
label = "dangerous weapons", value = FALSE),
style = "opacity: 0.80")
)
),
#"CRIMINAL MISCHIEF & RELATED OF"
# "GRAND LARCENY"
# "BURGLARY"
# "FELONY ASSAULT"
# "MISCELLANEOUS PENAL LAW"
# "GRAND LARCENY OF MOTOR VEHICLE"
# "ROBBERY"
# "DANGEROUS WEAPONS"
# selectInput("Month",
# label = "Month",
# choices = c('a', 'b', 'c')
# )
tabPanel("COVID and Crimes",
sidebarPanel(
selectInput("borough",
label = "Borough",
choices = c('Manhattan', 'Bronx', 'Queens', 'Brooklyn', 'Staten Island')
),
selectInput("crime",
label = "Type of crime",
choices = tolower(c("CRIMINAL MISCHIEF & RELATED OF", "GRAND LARCENY", "BURGLARY",
"FELONY ASSAULT", "MISCELLANEOUS PENAL LAW",
"GRAND LARCENY OF MOTOR VEHICLE",
"ROBBERY", "DANGEROUS WEAPONS"))
)
),
mainPanel(
plotOutput(outputId = "t3Plot1"),
plotOutput(outputId = "t3Plot2")
)
),
# [1] "ANTI-MALE HOMOSEXUAL (GAY)" "ANTI-WHITE"
# [3] "ANTI-MUSLIM" "ANTI-HISPANIC"
# [5] "ANTI-TRANSGENDER" "ANTI-JEWISH"
# [7] "ANTI-ASIAN" "ANTI-BLACK"
# [9] "ANTI-FEMALE HOMOSEXUAL (LESBIAN)" "ANTI-ARAB"
# [11] "ANTI-CATHOLIC" "ANTI-GENDER NON-CONFORMING"
# [13] "ANTI-FEMALE" "ANTI-LGBT (MIXED GROUP)"
# [15] "ANTI-MULTI-RACIAL GROUPS" "ANTI-OTHER ETHNICITY"
# [17] "60 YRS AND OLDER" "ANTI-HINDU"
# [19] "ANTI-BUDDHIST" "ANTI-JEHOVAHS WITNESS"
# [21] "ANTI-PHYSICAL DISABILITY" "ANTI-OTHER RELIGION"
# [23] "ANTI-RELIGIOUS PRACTICE GENERALLY"
tabPanel("Hate Crimes and COVID",
sidebarPanel(
selectInput("county",
label = "borough",
choices = c('Manhattan', 'Bronx', 'Queens', 'Brooklyn', 'Staten Island')
),
selectInput("bias",
label = "bias",
choices = c("ANTI-JEWISH", "ANTI-ASIAN", "ANTI-MALE HOMOSEXUAL (GAY)",
"ANTI-BLACK", "ANTI-WHITE")
)
),
mainPanel(
plotOutput(outputId = "t4Plot1"),
plotOutput(outputId = "t4Plot2")
)
),
tabPanel("Appendix",
fluidRow(
tags$img(src = "hate.jpeg", class = "background", height ="180%", width="100%", style = "opacity: 0.5"),
absolutePanel(id = "text", class = "foreground-content",
top = "20%", left = "15%", right = "15%", width = "70%", fixed=FALSE,
draggable = FALSE, height = 200,
fluidRow(
style = "padding: 8%; background-color: white",
tags$h1("Appendix", style="color:#20bc9c;font-weight:bold"),
tags$p("All data sources are from NYC Open Data. The data sources used can be found here.", style = "color;font-weight:bold"),
tags$p("Made with R Shiny. Packages used include dplyr, lubridate, shinythemes, leaflet, tidyr.
This project was created as part of Columbia University's STATGR5243 Applied Data Science course.", style = "color:#18bc9c;font-weight:bold"),
tags$p("Authors: Bohao Ma (Department of Statistics, Columbia University), Egem Yorulmaz (Department of Applied Mathematics, Columbia University), Qian Zhang (Department of Statistics, Columbia University),
Jiayi Nie (Columbia University).", style = "color:#18bc9c;font-weight:bold"),
tags$p("Github: See the code and data sources in our Github repository at https://github.com/TZstatsADS/Fall2021-Project2-group8", style = "color:#18bc9c;font-weight:bold")
),
style = "opacity: 0.85")
))
)
) |
#need to add function argument for horver to separate out ECSE MLs and validations
plotBySite <-
function(download = TRUE,
DirIn,
DirOut,
site,
dataproduct,
exchange = "nsae",
level = "dp04",
ymin = -50,
ymax = 50
) {
library(neonUtilities)
library(rhdf5)
library(ggplot2)
if (download == TRUE) {
if (!dir.exists(paste0(Dir, "/", site))) {
dir.create(paste0(Dir, "/", site))
}
zipsByProduct(
dpID = "DP4.00200.001",
package = "basic",
site = site,
startdate = "2010-01",
enddate = "2020-01",
savepath = paste0(Dir, "/", site),
check.size = F
)
}
if (level == "dp04") {
print(paste0("plotting data for ", site))
setwd(paste0(Dir, "/", site))
if (!file.exists(paste0(site, "_data_", level, ".rds"))) {
fluxData <-
stackEddy(filepath = paste0(Dir, "/", site, "/filesToStack00200"),
level = level, avg=30)
saveRDS(fluxData, (paste0(site, "_data_", level, ".rds")))
} else {
fluxData <- readRDS((paste0(site, "_data_", level, ".rds")))
}
siteData <- fluxData[[site]]
timeB <-
substring(siteData$timeBgn, 1, nchar(siteData$timeBgn) - 4)
timeB <-
strptime(timeB, format = "%Y-%m-%dT%H:%M:%S", tz = "GMT")
timeB <- as.POSIXct(timeB)
siteData <- cbind(timeB, fluxData[[site]])
missingQF <-
siteData[is.na(siteData[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]]),]
hasQF <-
siteData[!is.na(siteData[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]]),]
flaggedData <-
hasQF[which(hasQF[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]] == 1),]
goodData <-
hasQF[which(hasQF[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]] == 0),]
percentNaN <-
round(nrow(siteData[is.na(siteData[[paste0("data.", dataproduct, ".", exchange, ".flux")]]),]) /
nrow(siteData), digits = 2) * 100
percentMissingQF <-
round(nrow(siteData[is.na(siteData[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]]),]) /
nrow(siteData), digits = 2) * 100
percentFlagged <-
round(nrow(hasQF[which(hasQF[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]] == 1),]) /
nrow(siteData), digits = 2) * 100
percentGood <-
round(nrow(hasQF[which(hasQF[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]] == 0),]) /
nrow(siteData), digits = 2) * 100
#validity <- need to think about how to calculate this due to the missing quality flags for turb and nsae. Cove currently just does availability for these products. If I calculate true validity here, it can only be for the period with non-missing quality flags.
setwd(DirOut)
cols <-
c("noQf" = "gray",
"qfFnl_1" = "#f04546",
"qfFnl_0" = "black")
p <-
ggplot(missingQF, aes(x = timeB, y = missingQF[[paste0("data.", dataproduct, ".", exchange, ".flux")]]), colors = cols) +
geom_point(color = "gray") +
theme_bw() +
xlab("Date") +
ylab(ifelse(dataproduct == "fluxCo2",
paste0(dataproduct, "_", "_umolCo2 m-2 s-1_", exchange),
paste0(dataproduct, "_", "_W m-2_", exchange))) +
ylim(ymin, ymax) +
xlim(min(siteData$timeB), max(siteData$timeB)) +
ggtitle(
paste0(
site,
"_",
exchange
),
subtitle = paste0(percentNaN,
"%NaN, ",
percentMissingQF,
"%noQF, ",
percentFlagged,
"%raisedQF, ",
percentGood,
"%goodData")
)
if (nrow(flaggedData) > 0) {
p <- p + geom_point(data = flaggedData,
mapping = aes(x = timeB, y = flaggedData[[paste0("data.", dataproduct, ".", exchange, ".flux")]], color = "qfFnl_1"))
}
if (nrow(goodData) > 0) {
p <- p + geom_point(data = goodData,
mapping = aes(x = timeB, y = goodData[[paste0("data.", dataproduct, ".", exchange, ".flux")]], color = "qfFnl_0"))
}
p + scale_colour_manual(
name = "",
values = cols,
guide = guide_legend(override.aes = aes(fill =
NA))
) +
ggsave(
paste0(site, "_", exchange, "_", dataproduct, ".png"),
width = 10,
height = 6
)
setwd(DirIn)
}
if (level == "dp01") {
print(paste0("plotting data for ", site))
setwd(paste0(Dir, "/", site))
if (!file.exists(paste0(site, "_data_", level, ".rds"))) {
fluxData <-
stackEddy(filepath = paste0(Dir, "/", site, "/filesToStack00200"),
level = level, avg=30)
saveRDS(fluxData, (paste0(site, "_data_", level, ".rds")))
} else {
fluxData <- readRDS((paste0(site, "_data_", level, ".rds")))
}
siteData <- fluxData[[site]]
timeB <-
substring(siteData$timeBgn, 1, nchar(siteData$timeBgn) - 4)
timeB <-
strptime(timeB, format = "%Y-%m-%dT%H:%M:%S", tz = "GMT")
timeB <- as.POSIXct(timeB)
siteData <- cbind(timeB, fluxData[[site]])
##in the future change dataType to dataproduct and dataproduct to datastream
if (dataproduct == "rtioMoleDryCo2" & exchange == "turb") {
dataType <- "co2Turb"
}
if (dataproduct == "rtioMoleDryH2o" & exchange == "turb") {
dataType <- "h2oTurb"
}
if (dataproduct == "rtioMoleDryCo2" & exchange == "stor") {
dataType <- "co2Stor"
}
if (dataproduct == "rtioMoleDryH2o" & exchange == "stor") {
dataType <- "h2oStor"
}
if (dataproduct == "dlta13CCo2") {
dataType <- "isoCo2"
}
if (dataproduct == "dlta18OH2o") {
dataType <- "isoH2o"
}
#should add subsetting by measurement level
missingQF <-
siteData[is.na(siteData[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]]),]
hasQF <-
siteData[!is.na(siteData[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]]),]
flaggedData <-
hasQF[which(hasQF[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]] == 1),]
goodData <-
hasQF[which(hasQF[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]] == 0),]
percentNaN <-
round(nrow(siteData[is.na(siteData[[paste0("data.", dataType, ".", dataproduct, ".mean")]]),]) /
nrow(siteData), digits = 2) * 100
percentMissingQF <-
round(nrow(siteData[is.na(siteData[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]]),]) /
nrow(siteData), digits = 2) * 100
percentFlagged <-
round(nrow(hasQF[which(hasQF[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]] == 1),]) /
nrow(siteData), digits = 2) * 100
percentGood <-
round(nrow(hasQF[which(hasQF[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]] == 0),]) /
nrow(siteData), digits = 2) * 100
if (!is.null(DirOut)) {
setwd(DirOut)
}
cols <-
c("noQf" = "gray",
"qfFnl_1" = "#f04546",
"qfFnl_0" = "black")
p <-
ggplot(missingQF, aes(x = timeB, y = missingQF[[paste0("data.", dataType, ".", dataproduct, ".mean")]]), colors = cols) +
geom_point(color = "gray") +
theme_bw() +
xlab("Date") +
ylab(dataproduct) +
ylim(ymin, ymax) +
xlim(min(siteData$timeB), max(siteData$timeB)) +
ggtitle(
paste0(
site,
"_",
dataproduct,
"_",
exchange
),
subtitle = paste0(percentNaN,
"%NaN, ",
percentMissingQF,
"%noQF, ",
percentFlagged,
"%raisedQF, ",
percentGood,
"%goodData")
)
if (nrow(flaggedData) > 0) {
p <- p + geom_point(data = flaggedData,
mapping = aes(x = timeB, y = flaggedData[[paste0("data.", dataType, ".", dataproduct, ".mean")]], color = "qfFnl_1"))
}
if (nrow(goodData) > 0) {
p <- p + geom_point(data = goodData,
mapping = aes(x = timeB, y = goodData[[paste0("data.", dataType, ".", dataproduct, ".mean")]], color = "qfFnl_0"))
}
p + scale_colour_manual(
name = "",
values = cols,
guide = guide_legend(override.aes = aes(fill =
NA))
) +
ggsave(
paste0(site, "_", exchange, "_", dataproduct, ".png"),
width = 10,
height = 6
)
setwd(DirIn)
}
}
| /src/plotBySite.R | no_license | DBJeez/EC_Validity | R | false | false | 9,539 | r |
#need to add function argument for horver to separate out ECSE MLs and validations
plotBySite <-
function(download = TRUE,
DirIn,
DirOut,
site,
dataproduct,
exchange = "nsae",
level = "dp04",
ymin = -50,
ymax = 50
) {
library(neonUtilities)
library(rhdf5)
library(ggplot2)
if (download == TRUE) {
if (!dir.exists(paste0(Dir, "/", site))) {
dir.create(paste0(Dir, "/", site))
}
zipsByProduct(
dpID = "DP4.00200.001",
package = "basic",
site = site,
startdate = "2010-01",
enddate = "2020-01",
savepath = paste0(Dir, "/", site),
check.size = F
)
}
if (level == "dp04") {
print(paste0("plotting data for ", site))
setwd(paste0(Dir, "/", site))
if (!file.exists(paste0(site, "_data_", level, ".rds"))) {
fluxData <-
stackEddy(filepath = paste0(Dir, "/", site, "/filesToStack00200"),
level = level, avg=30)
saveRDS(fluxData, (paste0(site, "_data_", level, ".rds")))
} else {
fluxData <- readRDS((paste0(site, "_data_", level, ".rds")))
}
siteData <- fluxData[[site]]
timeB <-
substring(siteData$timeBgn, 1, nchar(siteData$timeBgn) - 4)
timeB <-
strptime(timeB, format = "%Y-%m-%dT%H:%M:%S", tz = "GMT")
timeB <- as.POSIXct(timeB)
siteData <- cbind(timeB, fluxData[[site]])
missingQF <-
siteData[is.na(siteData[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]]),]
hasQF <-
siteData[!is.na(siteData[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]]),]
flaggedData <-
hasQF[which(hasQF[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]] == 1),]
goodData <-
hasQF[which(hasQF[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]] == 0),]
percentNaN <-
round(nrow(siteData[is.na(siteData[[paste0("data.", dataproduct, ".", exchange, ".flux")]]),]) /
nrow(siteData), digits = 2) * 100
percentMissingQF <-
round(nrow(siteData[is.na(siteData[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]]),]) /
nrow(siteData), digits = 2) * 100
percentFlagged <-
round(nrow(hasQF[which(hasQF[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]] == 1),]) /
nrow(siteData), digits = 2) * 100
percentGood <-
round(nrow(hasQF[which(hasQF[[paste0("qfqm.", dataproduct, ".", exchange, ".qfFinl")]] == 0),]) /
nrow(siteData), digits = 2) * 100
#validity <- need to think about how to calculate this due to the missing quality flags for turb and nsae. Cove currently just does availability for these products. If I calculate true validity here, it can only be for the period with non-missing quality flags.
setwd(DirOut)
cols <-
c("noQf" = "gray",
"qfFnl_1" = "#f04546",
"qfFnl_0" = "black")
p <-
ggplot(missingQF, aes(x = timeB, y = missingQF[[paste0("data.", dataproduct, ".", exchange, ".flux")]]), colors = cols) +
geom_point(color = "gray") +
theme_bw() +
xlab("Date") +
ylab(ifelse(dataproduct == "fluxCo2",
paste0(dataproduct, "_", "_umolCo2 m-2 s-1_", exchange),
paste0(dataproduct, "_", "_W m-2_", exchange))) +
ylim(ymin, ymax) +
xlim(min(siteData$timeB), max(siteData$timeB)) +
ggtitle(
paste0(
site,
"_",
exchange
),
subtitle = paste0(percentNaN,
"%NaN, ",
percentMissingQF,
"%noQF, ",
percentFlagged,
"%raisedQF, ",
percentGood,
"%goodData")
)
if (nrow(flaggedData) > 0) {
p <- p + geom_point(data = flaggedData,
mapping = aes(x = timeB, y = flaggedData[[paste0("data.", dataproduct, ".", exchange, ".flux")]], color = "qfFnl_1"))
}
if (nrow(goodData) > 0) {
p <- p + geom_point(data = goodData,
mapping = aes(x = timeB, y = goodData[[paste0("data.", dataproduct, ".", exchange, ".flux")]], color = "qfFnl_0"))
}
p + scale_colour_manual(
name = "",
values = cols,
guide = guide_legend(override.aes = aes(fill =
NA))
) +
ggsave(
paste0(site, "_", exchange, "_", dataproduct, ".png"),
width = 10,
height = 6
)
setwd(DirIn)
}
if (level == "dp01") {
print(paste0("plotting data for ", site))
setwd(paste0(Dir, "/", site))
if (!file.exists(paste0(site, "_data_", level, ".rds"))) {
fluxData <-
stackEddy(filepath = paste0(Dir, "/", site, "/filesToStack00200"),
level = level, avg=30)
saveRDS(fluxData, (paste0(site, "_data_", level, ".rds")))
} else {
fluxData <- readRDS((paste0(site, "_data_", level, ".rds")))
}
siteData <- fluxData[[site]]
timeB <-
substring(siteData$timeBgn, 1, nchar(siteData$timeBgn) - 4)
timeB <-
strptime(timeB, format = "%Y-%m-%dT%H:%M:%S", tz = "GMT")
timeB <- as.POSIXct(timeB)
siteData <- cbind(timeB, fluxData[[site]])
##in the future change dataType to dataproduct and dataproduct to datastream
if (dataproduct == "rtioMoleDryCo2" & exchange == "turb") {
dataType <- "co2Turb"
}
if (dataproduct == "rtioMoleDryH2o" & exchange == "turb") {
dataType <- "h2oTurb"
}
if (dataproduct == "rtioMoleDryCo2" & exchange == "stor") {
dataType <- "co2Stor"
}
if (dataproduct == "rtioMoleDryH2o" & exchange == "stor") {
dataType <- "h2oStor"
}
if (dataproduct == "dlta13CCo2") {
dataType <- "isoCo2"
}
if (dataproduct == "dlta18OH2o") {
dataType <- "isoH2o"
}
#should add subsetting by measurement level
missingQF <-
siteData[is.na(siteData[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]]),]
hasQF <-
siteData[!is.na(siteData[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]]),]
flaggedData <-
hasQF[which(hasQF[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]] == 1),]
goodData <-
hasQF[which(hasQF[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]] == 0),]
percentNaN <-
round(nrow(siteData[is.na(siteData[[paste0("data.", dataType, ".", dataproduct, ".mean")]]),]) /
nrow(siteData), digits = 2) * 100
percentMissingQF <-
round(nrow(siteData[is.na(siteData[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]]),]) /
nrow(siteData), digits = 2) * 100
percentFlagged <-
round(nrow(hasQF[which(hasQF[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]] == 1),]) /
nrow(siteData), digits = 2) * 100
percentGood <-
round(nrow(hasQF[which(hasQF[[paste0("qfqm.", dataType, ".", dataproduct, ".qfFinl")]] == 0),]) /
nrow(siteData), digits = 2) * 100
if (!is.null(DirOut)) {
setwd(DirOut)
}
cols <-
c("noQf" = "gray",
"qfFnl_1" = "#f04546",
"qfFnl_0" = "black")
p <-
ggplot(missingQF, aes(x = timeB, y = missingQF[[paste0("data.", dataType, ".", dataproduct, ".mean")]]), colors = cols) +
geom_point(color = "gray") +
theme_bw() +
xlab("Date") +
ylab(dataproduct) +
ylim(ymin, ymax) +
xlim(min(siteData$timeB), max(siteData$timeB)) +
ggtitle(
paste0(
site,
"_",
dataproduct,
"_",
exchange
),
subtitle = paste0(percentNaN,
"%NaN, ",
percentMissingQF,
"%noQF, ",
percentFlagged,
"%raisedQF, ",
percentGood,
"%goodData")
)
if (nrow(flaggedData) > 0) {
p <- p + geom_point(data = flaggedData,
mapping = aes(x = timeB, y = flaggedData[[paste0("data.", dataType, ".", dataproduct, ".mean")]], color = "qfFnl_1"))
}
if (nrow(goodData) > 0) {
p <- p + geom_point(data = goodData,
mapping = aes(x = timeB, y = goodData[[paste0("data.", dataType, ".", dataproduct, ".mean")]], color = "qfFnl_0"))
}
p + scale_colour_manual(
name = "",
values = cols,
guide = guide_legend(override.aes = aes(fill =
NA))
) +
ggsave(
paste0(site, "_", exchange, "_", dataproduct, ".png"),
width = 10,
height = 6
)
setwd(DirIn)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.