content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
\name{data.sim.mfr}
\alias{data.sim.mfr}
\alias{data.sim.facets}
\docType{data}
\title{
Simulated Multifaceted Data
}
\description{
Simulated data from multiple facets.
}
%% ## DON'T FORGET TO SET SEEDS BEFORE SIMULATING ACTUAL SIM-DATA
%% ## (NOT DONE YET)
%% ## Safe simulation script in "inst"-folder
\usage{
data(data.sim.mfr)
data(data.sim.facets)
}
\format{
The format of \code{data.sim.mfr} is: \cr
\code{ num [1:100, 1:5] 3 2 1 1 0 1 0 1 0 0 ...} \cr
\code{ - attr(*, "dimnames")=List of 2} \cr
\code{ ..$ : chr [1:100] "V1" "V1.1" "V1.2" "V1.3" ...} \cr
\code{ ..$ : NULL}
The format of \code{data.sim.facets} is: \cr
\code{'data.frame': 100 obs. of 3 variables:} \cr
\code{ $ rater : num 1 2 3 4 5 1 2 3 4 5 ...} \cr
\code{ $ topic : num 3 1 3 1 3 2 3 2 2 1 ...} \cr
\code{ $ female: num 2 2 1 2 1 1 2 1 2 1 ...} \cr
}
%\details{
%% ...
% }
\source{
Simulated
}
%\references{
%% none
%}
\examples{
#######
# sim multi faceted Rasch model
data(data.sim.mfr)
data(data.sim.facets)
# 1: A-matrix test_rater
test_1_items <- .A.matrix( data.sim.mfr, formulaA = ~rater,
facets = data.sim.facets, constraint = "items" )
test_1_cases <- .A.matrix( data.sim.mfr, formulaA = ~rater,
facets = data.sim.facets, constraint = "cases" )
# 2: test_item+rater
test_2_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+rater,
facets = data.sim.facets, constraint = "cases" )
# 3: test_item+rater+topic+ratertopic
test_3_items <- .A.matrix( data.sim.mfr, formulaA = ~item+rater*topic,
facets = data.sim.facets, constraint = "items" )
# conquest uses a different way of ordering the rows
# these are the first few rows of the conquest design matrix
# test_3_items$A[grep("item1([[:print:]])*topic1", rownames(test_3_items)),]
# 4: test_item+step
test_4_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+step,
facets = data.sim.facets, constraint = "cases" )
# 5: test_item+item:step
test_5_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+item:step,
facets = data.sim.facets, constraint = "cases" )
test_5_cases$A[, grep("item1", colnames(test_5_cases)) ]
# 5+x: more
# => 6: is this even well defined in the conquest-design output
# (see test_item+topicstep_cases.cqc / .des)
# regardless of the meaning of such a formula;
# currently .A.matrix throws a warning
# test_6_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+topic:step,
# facets = data.sim.facets, constraint = "cases" )
test_7_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+topic+topic:step,
facets = data.sim.facets, constraint = "cases" )
\dontrun{
# => 8: same as with 6
test_8_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+rater+item:rater:step,
facets = data.sim.facets, constraint = "cases" )
## [1] "Can't proceed the estimation: Lower-order term is missing."
test_9_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+step+rater+item:step+item:rater,
facets = data.sim.facets, constraint = "cases" )
test_10_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+female+item:female,
facets = data.sim.facets, constraint = "cases" )
### All Design matrices
test_1_cases <- designMatrices.mfr( data.sim.mfr, formulaA = ~rater,
facets = data.sim.facets, constraint = "cases" )
test_4_cases <- designMatrices.mfr( data.sim.mfr, formulaA = ~item+item:step,
facets = data.sim.facets, constraint = "cases" )
### TAM
test_4_cases <- tam.mml.mfr( data.sim.mfr, formulaA = ~item+item:step )
test_tam <- tam.mml( data.sim.mfr )
test_1_cases <- tam.mml.mfr( data.sim.mfr, formulaA = ~rater,
facets = data.sim.facets, constraint = "cases" )
test_2_cases <- tam.mml.mfr( data.sim.mfr, formulaA = ~item+rater,
facets = data.sim.facets, constraint = "cases" )}
}
\keyword{datasets}
| /man/data.sim.mfr.Rd | no_license | ayoakinphd/TAM | R | false | false | 4,117 | rd | \name{data.sim.mfr}
\alias{data.sim.mfr}
\alias{data.sim.facets}
\docType{data}
\title{
Simulated Multifaceted Data
}
\description{
Simulated data from multiple facets.
}
%% ## DON'T FORGET TO SET SEEDS BEFORE SIMULATING ACTUAL SIM-DATA
%% ## (NOT DONE YET)
%% ## Safe simulation script in "inst"-folder
\usage{
data(data.sim.mfr)
data(data.sim.facets)
}
\format{
The format of \code{data.sim.mfr} is: \cr
\code{ num [1:100, 1:5] 3 2 1 1 0 1 0 1 0 0 ...} \cr
\code{ - attr(*, "dimnames")=List of 2} \cr
\code{ ..$ : chr [1:100] "V1" "V1.1" "V1.2" "V1.3" ...} \cr
\code{ ..$ : NULL}
The format of \code{data.sim.facets} is: \cr
\code{'data.frame': 100 obs. of 3 variables:} \cr
\code{ $ rater : num 1 2 3 4 5 1 2 3 4 5 ...} \cr
\code{ $ topic : num 3 1 3 1 3 2 3 2 2 1 ...} \cr
\code{ $ female: num 2 2 1 2 1 1 2 1 2 1 ...} \cr
}
%\details{
%% ...
% }
\source{
Simulated
}
%\references{
%% none
%}
\examples{
#######
# sim multi faceted Rasch model
data(data.sim.mfr)
data(data.sim.facets)
# 1: A-matrix test_rater
test_1_items <- .A.matrix( data.sim.mfr, formulaA = ~rater,
facets = data.sim.facets, constraint = "items" )
test_1_cases <- .A.matrix( data.sim.mfr, formulaA = ~rater,
facets = data.sim.facets, constraint = "cases" )
# 2: test_item+rater
test_2_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+rater,
facets = data.sim.facets, constraint = "cases" )
# 3: test_item+rater+topic+ratertopic
test_3_items <- .A.matrix( data.sim.mfr, formulaA = ~item+rater*topic,
facets = data.sim.facets, constraint = "items" )
# conquest uses a different way of ordering the rows
# these are the first few rows of the conquest design matrix
# test_3_items$A[grep("item1([[:print:]])*topic1", rownames(test_3_items)),]
# 4: test_item+step
test_4_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+step,
facets = data.sim.facets, constraint = "cases" )
# 5: test_item+item:step
test_5_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+item:step,
facets = data.sim.facets, constraint = "cases" )
test_5_cases$A[, grep("item1", colnames(test_5_cases)) ]
# 5+x: more
# => 6: is this even well defined in the conquest-design output
# (see test_item+topicstep_cases.cqc / .des)
# regardless of the meaning of such a formula;
# currently .A.matrix throws a warning
# test_6_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+topic:step,
# facets = data.sim.facets, constraint = "cases" )
test_7_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+topic+topic:step,
facets = data.sim.facets, constraint = "cases" )
\dontrun{
# => 8: same as with 6
test_8_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+rater+item:rater:step,
facets = data.sim.facets, constraint = "cases" )
## [1] "Can't proceed the estimation: Lower-order term is missing."
test_9_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+step+rater+item:step+item:rater,
facets = data.sim.facets, constraint = "cases" )
test_10_cases <- .A.matrix( data.sim.mfr, formulaA = ~item+female+item:female,
facets = data.sim.facets, constraint = "cases" )
### All Design matrices
test_1_cases <- designMatrices.mfr( data.sim.mfr, formulaA = ~rater,
facets = data.sim.facets, constraint = "cases" )
test_4_cases <- designMatrices.mfr( data.sim.mfr, formulaA = ~item+item:step,
facets = data.sim.facets, constraint = "cases" )
### TAM
test_4_cases <- tam.mml.mfr( data.sim.mfr, formulaA = ~item+item:step )
test_tam <- tam.mml( data.sim.mfr )
test_1_cases <- tam.mml.mfr( data.sim.mfr, formulaA = ~rater,
facets = data.sim.facets, constraint = "cases" )
test_2_cases <- tam.mml.mfr( data.sim.mfr, formulaA = ~item+rater,
facets = data.sim.facets, constraint = "cases" )}
}
\keyword{datasets}
|
#Load libraries
library(dplyr)
library(magrittr)
library(brms)
##Read in data
# Dataset
data_DA <- read.csv("data/growth/processed/analysis/Ldeli_quangen_growth_DA.csv", stringsAsFactors = F)
str(data_DA)
dim(data_DA)
#Format date and treatment
data_DA %<>% mutate(treatment = as.factor(treatment),
liz_id = as.factor(liz_id),
dam_id = as.factor(dam_id))
#G matrix
G_VCV <- read.csv("output/G/Ga_SNPready.csv", row.names = 1) %>% as.matrix()
# Set some prirors
priors <- c(prior(normal(0, 5), "Intercept"),
prior(normal(0, 10), "b"),
prior(student_t(3, 0, 10), class = "sd"))
#The model
brm_12_het_mod <-
bf(lnMass ~ treatment * z_days_since_hatch + z_days_since_hatch_I2 +
(1 + z_days_since_hatch + z_days_since_hatch_I2 | gr(F1_Genotype, cov = G_VCV)) +
(1 + z_days_since_hatch + z_days_since_hatch_I2 | dam_id),
sigma ~ z_days_since_hatch)
brm_12_het_fixed2 <- brm(brm_12_het_mod,
family = gaussian(),
prior = priors,
data2 = list(G_VCV = G_VCV),
data = data_DA,
chains = 4, cores = 4, iter = 6000, warmup = 1000, thin = 10,
control = list(adapt_delta = 0.98, max_treedepth=12), save_pars = save_pars(all = TRUE))
saveRDS(brm_12_het_fixed2, "output/rds/brm_12_het_fixed2")
| /Katana/brms_mod.12.DA_het_fixed2.R | no_license | fontikar/ldeli_growth | R | false | false | 1,383 | r | #Load libraries
library(dplyr)
library(magrittr)
library(brms)
##Read in data
# Dataset
data_DA <- read.csv("data/growth/processed/analysis/Ldeli_quangen_growth_DA.csv", stringsAsFactors = F)
str(data_DA)
dim(data_DA)
#Format date and treatment
data_DA %<>% mutate(treatment = as.factor(treatment),
liz_id = as.factor(liz_id),
dam_id = as.factor(dam_id))
#G matrix
G_VCV <- read.csv("output/G/Ga_SNPready.csv", row.names = 1) %>% as.matrix()
# Set some prirors
priors <- c(prior(normal(0, 5), "Intercept"),
prior(normal(0, 10), "b"),
prior(student_t(3, 0, 10), class = "sd"))
#The model
brm_12_het_mod <-
bf(lnMass ~ treatment * z_days_since_hatch + z_days_since_hatch_I2 +
(1 + z_days_since_hatch + z_days_since_hatch_I2 | gr(F1_Genotype, cov = G_VCV)) +
(1 + z_days_since_hatch + z_days_since_hatch_I2 | dam_id),
sigma ~ z_days_since_hatch)
brm_12_het_fixed2 <- brm(brm_12_het_mod,
family = gaussian(),
prior = priors,
data2 = list(G_VCV = G_VCV),
data = data_DA,
chains = 4, cores = 4, iter = 6000, warmup = 1000, thin = 10,
control = list(adapt_delta = 0.98, max_treedepth=12), save_pars = save_pars(all = TRUE))
saveRDS(brm_12_het_fixed2, "output/rds/brm_12_het_fixed2")
|
context("Test color console lists")
test_that("basic print", {
litems <- list(
"Sets key using surgID, procDate, and argument cohortVars",
"Executes funs ApplySurgFilter, ApplyCohortFilter, IndexCohortCases",
"Funs above are applied successively on the dataset",
"Summarize cohort experience profile by surgeon",
"Utilize Surg-Cohort stat table to filter (surg, cohort) obs",
"return list containing data and surg-cohort stat table"
)
printlines(liheader = "Data Processing Steps:",
litems = litems,
likeyword = "DATA",
lileadcol = "white",
libgfill = "blue",
likwtxtcol = "black",
likwbgfill = "yellow",
symcol = "blue",
sym = "*",
is_ordered = FALSE)
printstamp("message")
printstamp("message", ">")
printstamp("message", "AB") ### bug
ts <- Sys.time()
print_runtime(ts)
})
| /tests/testthat/test-basic.R | no_license | bfatemi/consolenotes | R | false | false | 938 | r | context("Test color console lists")
test_that("basic print", {
litems <- list(
"Sets key using surgID, procDate, and argument cohortVars",
"Executes funs ApplySurgFilter, ApplyCohortFilter, IndexCohortCases",
"Funs above are applied successively on the dataset",
"Summarize cohort experience profile by surgeon",
"Utilize Surg-Cohort stat table to filter (surg, cohort) obs",
"return list containing data and surg-cohort stat table"
)
printlines(liheader = "Data Processing Steps:",
litems = litems,
likeyword = "DATA",
lileadcol = "white",
libgfill = "blue",
likwtxtcol = "black",
likwbgfill = "yellow",
symcol = "blue",
sym = "*",
is_ordered = FALSE)
printstamp("message")
printstamp("message", ">")
printstamp("message", "AB") ### bug
ts <- Sys.time()
print_runtime(ts)
})
|
##################################################################################
###### Table A.9: First stage regression of destruction on barrack distance ######
##################################################################################
rm(list=ls())
# load required libraries
library(readstata13)
library(stargazer)
# read data
data <- read.dta13("./kyrgyzstan.dta")
# recode variables
data$affected <- as.integer(data$affected)
data$affected <- data$affected - 1
# subset data set according to ethnic groups
data_uzbek <- data[which(data$ethnicity=="Uzbek"),]
### First stage
data_uzbek$distance <- 1-data_uzbek$apc_min_distance
dataAgg <- aggregate(data_uzbek[,c("affected", "distance")],
list(data_uzbek$id_psu),
mean)
# run regressions
first_stage_ind <- lm(affected ~ distance, data=data_uzbek)
#summary(first_stage_ind)
first_stage_psu <- lm(affected ~ distance, data=dataAgg)
#summary(first_stage_psu)
# table output
stargazer(first_stage_ind, first_stage_psu, covariate.labels = c("Distance to closest barack"),
star.char = c("*", "**", "***"),
title = "Table A.9: First stage regression of destruction on barrack distance",
star.cutoffs = c(0.05, 0.01, 0.001))
| /Original Paper and Code/Original Code/TableA9.R | no_license | CianStryker/Prosocial_Behavior | R | false | false | 1,268 | r | ##################################################################################
###### Table A.9: First stage regression of destruction on barrack distance ######
##################################################################################
rm(list=ls())
# load required libraries
library(readstata13)
library(stargazer)
# read data
data <- read.dta13("./kyrgyzstan.dta")
# recode variables
data$affected <- as.integer(data$affected)
data$affected <- data$affected - 1
# subset data set according to ethnic groups
data_uzbek <- data[which(data$ethnicity=="Uzbek"),]
### First stage
data_uzbek$distance <- 1-data_uzbek$apc_min_distance
dataAgg <- aggregate(data_uzbek[,c("affected", "distance")],
list(data_uzbek$id_psu),
mean)
# run regressions
first_stage_ind <- lm(affected ~ distance, data=data_uzbek)
#summary(first_stage_ind)
first_stage_psu <- lm(affected ~ distance, data=dataAgg)
#summary(first_stage_psu)
# table output
stargazer(first_stage_ind, first_stage_psu, covariate.labels = c("Distance to closest barack"),
star.char = c("*", "**", "***"),
title = "Table A.9: First stage regression of destruction on barrack distance",
star.cutoffs = c(0.05, 0.01, 0.001))
|
#' @title Cohen's Kappa
#' @description Bayesian alternative to Cohen's kappa
#' @param x predictor variable(s), Default: NULL
#' @param x.names optional names for predictor variable(s), Default: NULL
#' @param DF data to analyze
#' @param params define parameters to observe, Default: NULL
#' @param initial.list initial values for analysis, Default: list()
#' @param ... further arguments passed to or from other methods
#' @examples
#' # Simulate rater data
#' Rater1 <- c(rep(0,20),rep(1,80))
#' set.seed(100)
#' Rater2 <- c(rbinom(20,1,0.1), rbinom(80,1,0.9))
#' data <- data.frame(Rater1,Rater2)
#'
#' \donttest{
#' mcmc <- bfw(project.data = data,
#' x = "Rater1,Rater2",
#' saved.steps = 50000,
#' jags.model = "kappa",
#' jags.seed = 100,
#' silent = TRUE)
#'
#' }
#' # Print frequentist and Bayesian kappa
#' library(psych)
#' psych::cohen.kappa(data)$confid[1,]
#' # lower estimate upper
#' # 0.6137906 0.7593583 0.9049260
#' #' \donttest{ mcmc$summary.MCMC }
#' # Mean Median Mode ESS HDIlo HDIhi n
#' # Kappa[1]: 0.739176 0.7472905 0.7634503 50657 0.578132 0.886647 100
#' @seealso
#' \code{\link[stats]{complete.cases}}
#' @rdname StatsKappa
#' @export
#' @importFrom stats complete.cases
StatsKappa <- function(x = NULL,
x.names = NULL,
DF,
params = NULL,
initial.list = list(),
...
) {
# Fetch x parameters
x <- TrimSplit(x)
# Exclude noncomplete observations
DF <- DF[stats::complete.cases(DF[, x]), x]
# Create crosstable for x parameters
n.data <- as.data.frame(table(DF[, x]))
# name.contrasts for creating contrasts
job.names <- paste(x.names,collapse = " vs. ")
# Select raters
rater <- as.matrix(DF[, x])
# Determine which observations are equal across raters
equal <- apply(rater, 1, function(x) if (length(unique(x)) > 1) 0 else 1)
# Number of raters (2)
n.raters <- length(rater[1, ])
# Number of categories
n.categories <- length(unique(rater[,1]))
# Number of observations
n <- length(rater[, 1])
# Paramter(s) of interest
params <- if(length(params)) TrimSplit(params) else c("Kappa")
# Create data for Jags
data.list <- list(
rater = rater,
alpha = rep(1,n.categories),
equal = equal,
n.raters = n.raters,
n.categories = n.categories,
n = n
)
# Create name list
name.list <- list(
job.names = job.names
)
return ( list (
params = params,
data.list = data.list,
name.list = name.list,
n.data = n.data,
initial.list = initial.list
))
}
| /R/stats_kappa.R | permissive | anhnguyendepocen/bfw | R | false | false | 2,702 | r | #' @title Cohen's Kappa
#' @description Bayesian alternative to Cohen's kappa
#' @param x predictor variable(s), Default: NULL
#' @param x.names optional names for predictor variable(s), Default: NULL
#' @param DF data to analyze
#' @param params define parameters to observe, Default: NULL
#' @param initial.list initial values for analysis, Default: list()
#' @param ... further arguments passed to or from other methods
#' @examples
#' # Simulate rater data
#' Rater1 <- c(rep(0,20),rep(1,80))
#' set.seed(100)
#' Rater2 <- c(rbinom(20,1,0.1), rbinom(80,1,0.9))
#' data <- data.frame(Rater1,Rater2)
#'
#' \donttest{
#' mcmc <- bfw(project.data = data,
#' x = "Rater1,Rater2",
#' saved.steps = 50000,
#' jags.model = "kappa",
#' jags.seed = 100,
#' silent = TRUE)
#'
#' }
#' # Print frequentist and Bayesian kappa
#' library(psych)
#' psych::cohen.kappa(data)$confid[1,]
#' # lower estimate upper
#' # 0.6137906 0.7593583 0.9049260
#' #' \donttest{ mcmc$summary.MCMC }
#' # Mean Median Mode ESS HDIlo HDIhi n
#' # Kappa[1]: 0.739176 0.7472905 0.7634503 50657 0.578132 0.886647 100
#' @seealso
#' \code{\link[stats]{complete.cases}}
#' @rdname StatsKappa
#' @export
#' @importFrom stats complete.cases
StatsKappa <- function(x = NULL,
x.names = NULL,
DF,
params = NULL,
initial.list = list(),
...
) {
# Fetch x parameters
x <- TrimSplit(x)
# Exclude noncomplete observations
DF <- DF[stats::complete.cases(DF[, x]), x]
# Create crosstable for x parameters
n.data <- as.data.frame(table(DF[, x]))
# name.contrasts for creating contrasts
job.names <- paste(x.names,collapse = " vs. ")
# Select raters
rater <- as.matrix(DF[, x])
# Determine which observations are equal across raters
equal <- apply(rater, 1, function(x) if (length(unique(x)) > 1) 0 else 1)
# Number of raters (2)
n.raters <- length(rater[1, ])
# Number of categories
n.categories <- length(unique(rater[,1]))
# Number of observations
n <- length(rater[, 1])
# Paramter(s) of interest
params <- if(length(params)) TrimSplit(params) else c("Kappa")
# Create data for Jags
data.list <- list(
rater = rater,
alpha = rep(1,n.categories),
equal = equal,
n.raters = n.raters,
n.categories = n.categories,
n = n
)
# Create name list
name.list <- list(
job.names = job.names
)
return ( list (
params = params,
data.list = data.list,
name.list = name.list,
n.data = n.data,
initial.list = initial.list
))
}
|
library(shiny)
library(shinyTable)
shinyUI(pageWithSidebar(
# Application title
headerPanel("Proliferation Assay Plot"),
sidebarPanel(
textInput('experiment',label = h5("Enter experiment name"), value = "Experiment"),
textInput('gene',label = h5("Enter gene name"), value = "Gene"),
numericInput('replicate',label = h5("Enter number of replicates"), value = 3),
numericInput('timepoint',label = h5("Enter number of time points"), value = 4),
actionButton("submit", "Update Table")
),
# Show the simple table
mainPanel(
helpText(HTML("Insert data")),
textOutput("ogene"),
htable("tbl", colHeaders="provided"), # clickId="tblClick",
downloadButton('downloadData', 'Download Data'),
plotOutput("assay"),
downloadButton('downloadPlot', 'Download Plot')
# h3("Current Selection"),
# verbatimTextOutput("clickText")
)
))
| /old/ui.R | permissive | matteocereda/proliferation_assay | R | false | false | 913 | r | library(shiny)
library(shinyTable)
shinyUI(pageWithSidebar(
# Application title
headerPanel("Proliferation Assay Plot"),
sidebarPanel(
textInput('experiment',label = h5("Enter experiment name"), value = "Experiment"),
textInput('gene',label = h5("Enter gene name"), value = "Gene"),
numericInput('replicate',label = h5("Enter number of replicates"), value = 3),
numericInput('timepoint',label = h5("Enter number of time points"), value = 4),
actionButton("submit", "Update Table")
),
# Show the simple table
mainPanel(
helpText(HTML("Insert data")),
textOutput("ogene"),
htable("tbl", colHeaders="provided"), # clickId="tblClick",
downloadButton('downloadData', 'Download Data'),
plotOutput("assay"),
downloadButton('downloadPlot', 'Download Plot')
# h3("Current Selection"),
# verbatimTextOutput("clickText")
)
))
|
library(ggplot2)
library(parallel)
library(phest)
library(dplyr)
nn <- 20000
#10 SD
set.seed(10)
obs_10sd <- rnorm(20000, mean = 200, sd = 10)
to_10sd <- as.data.frame(obs_10sd)
ggplot(data= to_10sd, aes(x = obs_10sd)) + geom_histogram(bins = 150)
min(to_10sd) ## 162.6818
max(to_10sd) # 199.89258
quantile(obs_10sd, probs = c(0,0.1,0.5,0.9,1)) #10% -186.99 50% - 199.89
set.seed(1)
rep_10obs_10sd <- replicate(n = 30, expr = sample(obs_10sd, size = 10, replace = FALSE))
list_10obs_10sd <- split(rep_10obs_10sd, rep(1:ncol(rep_10obs_10sd), each = nrow(rep_10obs_10sd)))
set.seed(2)
rep_20obs_10sd <- replicate(n = 30, expr = sample(obs_10sd, size = 20, replace = FALSE))
list_20obs_10sd <- split(rep_20obs_10sd, rep(1:ncol(rep_20obs_10sd), each = nrow(rep_20obs_10sd)))
set.seed(5)
rep_50obs_10sd <- replicate(n = 30, expr = sample(obs_10sd, size = 50, replace = FALSE))
list_50obs_10sd <- split(rep_50obs_10sd, rep(1:ncol(rep_50obs_10sd), each = nrow(rep_50obs_10sd)))
#20 SD
set.seed(365)
obs_20sd <- rnorm(20000, mean = 200, sd = 20)
to_20sd <- as.data.frame(obs_20sd)
ggplot(data= to_20sd, aes(x = obs_20sd)) + geom_histogram(bins = 150)
min(to_20sd) # 119.9182
max(to_20sd) # 279.3462
quantile(obs_20sd, probs = c(0,0.1,0.5,0.9,1)) #10% -174.82 50% - 200.18
set.seed(1)
rep_10obs_20sd <- replicate(n = 30, expr = sample(obs_20sd, size = 10, replace = FALSE))
list_10obs_20sd <- split(rep_10obs_20sd, rep(1:ncol(rep_10obs_20sd), each = nrow(rep_10obs_20sd)))
set.seed(2)
rep_20obs_20sd <- replicate(n = 30, expr = sample(obs_20sd, size = 20, replace = FALSE))
list_20obs_20sd <- split(rep_20obs_20sd, rep(1:ncol(rep_20obs_20sd), each = nrow(rep_20obs_20sd)))
set.seed(5)
rep_50obs_20sd <- replicate(n = 30, expr = sample(obs_20sd, size = 50, replace = FALSE))
list_50obs_20sd <- split(rep_50obs_20sd, rep(1:ncol(rep_50obs_20sd), each = nrow(rep_50obs_20sd)))
#40 SD
set.seed(40)
obs_40sd <- rnorm(20000, mean = 200, sd = 40)
to_40sd <- as.data.frame(obs_40sd)
ggplot(data= to_40sd, aes(x = obs_40sd)) + geom_histogram(bins = 150)
min(to_40sd) # 35.88112
max(to_40sd) #361.3789
quantile(obs_40sd, probs = c(0,0.1,0.5,0.9,1)) #10% -149.30459 50% - 199.80307
set.seed(1)
rep_10obs_40sd <- replicate(n = 30, expr = sample(obs_40sd, size = 10, replace = FALSE))
list_10obs_40sd <- split(rep_10obs_40sd, rep(1:ncol(rep_10obs_40sd), each = nrow(rep_10obs_40sd)))
set.seed(2)
rep_20obs_40sd <- replicate(n = 30, expr = sample(obs_40sd, size = 20, replace = FALSE))
list_20obs_40sd <- split(rep_20obs_40sd, rep(1:ncol(rep_20obs_40sd), each = nrow(rep_20obs_40sd)))
set.seed(5)
rep_50obs_40sd <- replicate(n = 30, expr = sample(obs_40sd, size = 50, replace = FALSE))
list_50obs_40sd <- split(rep_50obs_40sd, rep(1:ncol(rep_50obs_40sd), each = nrow(rep_50obs_40sd)))
# lapply functions
pearse_onsetestimator <- function(x){
pearseonset <- weib.limit(x = x,upper = FALSE)
return(pearseonset)
}
# lapply functions
pearse_offsetestimator <- function(x){
pearseoffset <- weib.limit(x = x,upper = TRUE)
return(pearseoffset)
}
# 20 obs 40 sd
pearse_onset20obs_40sd <- unlist(lapply(list_20obs_40sd, FUN = pearse_onsetestimator))
pearse_onset20obs_40sd_df <- as.data.frame(split(pearse_onset20obs_40sd, 1:3))
# 50 obs 40 sd
pearse_onset50obs_40sd <- unlist(lapply(list_50obs_40sd, FUN = pearse_onsetestimator))
pearse_onset50obs_40sd_df <- as.data.frame(split(pearse_onset50obs_40sd, 1:3))
pearse_onset50obs_40sd_df <- pearse_onset50obs_40sd_df %>%
rename(estimate = X1, lowci = X2, highci = X3) %>%
mutate(true_onset = 35.88) %>%
mutate(pass = true_onset > lowci & true_onset < highci) %>%
mutate(distance = estimate - true_onset) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "pearse") %>%
mutate(ci = highci - lowci)
pearse_onset20obs_40sd_df <- pearse_onset20obs_40sd_df %>%
rename(estimate = X1, lowci = X2, highci = X3) %>%
mutate(true_onset = 35.88) %>%
mutate(pass = true_onset > lowci & true_onset < highci) %>%
mutate(distance = estimate - true_onset) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "pearse") %>%
mutate(ci = highci - lowci)
## offset fixes
pearse_offset20obs_40sd <- unlist(lapply(list_20obs_40sd, FUN = pearse_offsetestimator))
pearse_offset20obs_40sd_df <- as.data.frame(split(pearse_offset20obs_40sd, 1:3))
# 50 obs 40 sd
pearse_offset50obs_40sd <- unlist(lapply(list_50obs_40sd, FUN = pearse_offsetestimator))
pearse_offset50obs_40sd_df <- as.data.frame(split(pearse_offset50obs_40sd, 1:3))
pearse_offset50obs_40sd_df <- pearse_offset50obs_40sd_df %>%
rename(estimate = X1, lowci = X2, highci = X3) %>%
mutate(true_offset = 361.38) %>%
mutate(pass = true_offset > lowci & true_offset < highci) %>%
mutate(distance = estimate - true_offset) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "pearse") %>%
mutate(ci = highci - lowci)
pearse_offset20obs_40sd_df <- pearse_offset20obs_40sd_df %>%
rename(estimate = X1, lowci = X2, highci = X3) %>%
mutate(true_offset = 361.38) %>%
mutate(pass = true_offset > lowci & true_offset < highci) %>%
mutate(distance = estimate - true_offset) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "pearse") %>%
mutate(ci = highci - lowci)
| /scripts/summerscripts/pearse_40sd_scripts.R | no_license | mbelitz/Pollard-Incidental_Comparisons | R | false | false | 5,243 | r | library(ggplot2)
library(parallel)
library(phest)
library(dplyr)
nn <- 20000
#10 SD
set.seed(10)
obs_10sd <- rnorm(20000, mean = 200, sd = 10)
to_10sd <- as.data.frame(obs_10sd)
ggplot(data= to_10sd, aes(x = obs_10sd)) + geom_histogram(bins = 150)
min(to_10sd) ## 162.6818
max(to_10sd) # 199.89258
quantile(obs_10sd, probs = c(0,0.1,0.5,0.9,1)) #10% -186.99 50% - 199.89
set.seed(1)
rep_10obs_10sd <- replicate(n = 30, expr = sample(obs_10sd, size = 10, replace = FALSE))
list_10obs_10sd <- split(rep_10obs_10sd, rep(1:ncol(rep_10obs_10sd), each = nrow(rep_10obs_10sd)))
set.seed(2)
rep_20obs_10sd <- replicate(n = 30, expr = sample(obs_10sd, size = 20, replace = FALSE))
list_20obs_10sd <- split(rep_20obs_10sd, rep(1:ncol(rep_20obs_10sd), each = nrow(rep_20obs_10sd)))
set.seed(5)
rep_50obs_10sd <- replicate(n = 30, expr = sample(obs_10sd, size = 50, replace = FALSE))
list_50obs_10sd <- split(rep_50obs_10sd, rep(1:ncol(rep_50obs_10sd), each = nrow(rep_50obs_10sd)))
#20 SD
set.seed(365)
obs_20sd <- rnorm(20000, mean = 200, sd = 20)
to_20sd <- as.data.frame(obs_20sd)
ggplot(data= to_20sd, aes(x = obs_20sd)) + geom_histogram(bins = 150)
min(to_20sd) # 119.9182
max(to_20sd) # 279.3462
quantile(obs_20sd, probs = c(0,0.1,0.5,0.9,1)) #10% -174.82 50% - 200.18
set.seed(1)
rep_10obs_20sd <- replicate(n = 30, expr = sample(obs_20sd, size = 10, replace = FALSE))
list_10obs_20sd <- split(rep_10obs_20sd, rep(1:ncol(rep_10obs_20sd), each = nrow(rep_10obs_20sd)))
set.seed(2)
rep_20obs_20sd <- replicate(n = 30, expr = sample(obs_20sd, size = 20, replace = FALSE))
list_20obs_20sd <- split(rep_20obs_20sd, rep(1:ncol(rep_20obs_20sd), each = nrow(rep_20obs_20sd)))
set.seed(5)
rep_50obs_20sd <- replicate(n = 30, expr = sample(obs_20sd, size = 50, replace = FALSE))
list_50obs_20sd <- split(rep_50obs_20sd, rep(1:ncol(rep_50obs_20sd), each = nrow(rep_50obs_20sd)))
#40 SD
set.seed(40)
obs_40sd <- rnorm(20000, mean = 200, sd = 40)
to_40sd <- as.data.frame(obs_40sd)
ggplot(data= to_40sd, aes(x = obs_40sd)) + geom_histogram(bins = 150)
min(to_40sd) # 35.88112
max(to_40sd) #361.3789
quantile(obs_40sd, probs = c(0,0.1,0.5,0.9,1)) #10% -149.30459 50% - 199.80307
set.seed(1)
rep_10obs_40sd <- replicate(n = 30, expr = sample(obs_40sd, size = 10, replace = FALSE))
list_10obs_40sd <- split(rep_10obs_40sd, rep(1:ncol(rep_10obs_40sd), each = nrow(rep_10obs_40sd)))
set.seed(2)
rep_20obs_40sd <- replicate(n = 30, expr = sample(obs_40sd, size = 20, replace = FALSE))
list_20obs_40sd <- split(rep_20obs_40sd, rep(1:ncol(rep_20obs_40sd), each = nrow(rep_20obs_40sd)))
set.seed(5)
rep_50obs_40sd <- replicate(n = 30, expr = sample(obs_40sd, size = 50, replace = FALSE))
list_50obs_40sd <- split(rep_50obs_40sd, rep(1:ncol(rep_50obs_40sd), each = nrow(rep_50obs_40sd)))
# lapply functions
pearse_onsetestimator <- function(x){
pearseonset <- weib.limit(x = x,upper = FALSE)
return(pearseonset)
}
# lapply functions
pearse_offsetestimator <- function(x){
pearseoffset <- weib.limit(x = x,upper = TRUE)
return(pearseoffset)
}
# 20 obs 40 sd
pearse_onset20obs_40sd <- unlist(lapply(list_20obs_40sd, FUN = pearse_onsetestimator))
pearse_onset20obs_40sd_df <- as.data.frame(split(pearse_onset20obs_40sd, 1:3))
# 50 obs 40 sd
pearse_onset50obs_40sd <- unlist(lapply(list_50obs_40sd, FUN = pearse_onsetestimator))
pearse_onset50obs_40sd_df <- as.data.frame(split(pearse_onset50obs_40sd, 1:3))
pearse_onset50obs_40sd_df <- pearse_onset50obs_40sd_df %>%
rename(estimate = X1, lowci = X2, highci = X3) %>%
mutate(true_onset = 35.88) %>%
mutate(pass = true_onset > lowci & true_onset < highci) %>%
mutate(distance = estimate - true_onset) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "pearse") %>%
mutate(ci = highci - lowci)
pearse_onset20obs_40sd_df <- pearse_onset20obs_40sd_df %>%
rename(estimate = X1, lowci = X2, highci = X3) %>%
mutate(true_onset = 35.88) %>%
mutate(pass = true_onset > lowci & true_onset < highci) %>%
mutate(distance = estimate - true_onset) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "pearse") %>%
mutate(ci = highci - lowci)
## offset fixes
pearse_offset20obs_40sd <- unlist(lapply(list_20obs_40sd, FUN = pearse_offsetestimator))
pearse_offset20obs_40sd_df <- as.data.frame(split(pearse_offset20obs_40sd, 1:3))
# 50 obs 40 sd
pearse_offset50obs_40sd <- unlist(lapply(list_50obs_40sd, FUN = pearse_offsetestimator))
pearse_offset50obs_40sd_df <- as.data.frame(split(pearse_offset50obs_40sd, 1:3))
pearse_offset50obs_40sd_df <- pearse_offset50obs_40sd_df %>%
rename(estimate = X1, lowci = X2, highci = X3) %>%
mutate(true_offset = 361.38) %>%
mutate(pass = true_offset > lowci & true_offset < highci) %>%
mutate(distance = estimate - true_offset) %>%
mutate(obs = 50, sd = 40) %>%
mutate(estimator = "pearse") %>%
mutate(ci = highci - lowci)
pearse_offset20obs_40sd_df <- pearse_offset20obs_40sd_df %>%
rename(estimate = X1, lowci = X2, highci = X3) %>%
mutate(true_offset = 361.38) %>%
mutate(pass = true_offset > lowci & true_offset < highci) %>%
mutate(distance = estimate - true_offset) %>%
mutate(obs = 20, sd = 40) %>%
mutate(estimator = "pearse") %>%
mutate(ci = highci - lowci)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orcid_fundings.R
\name{orcid_fundings}
\alias{orcid_fundings}
\title{Get funding information for a person}
\usage{
orcid_fundings(orcid, put_code = NULL, format = "application/json",
summary = FALSE, ...)
}
\arguments{
\item{orcid}{(character) Orcid identifier(s), of the form
XXXX-XXXX-XXXX-XXXX. required.}
\item{put_code}{(character/integer) one or more put codes. up to
50. optional}
\item{format}{(character) Name of the content-type format. One of
"application/vnd.orcid+xml; qs=5", "application/orcid+xml; qs=3",
"application/xml", "application/vnd.orcid+json; qs=4",
"application/orcid+json; qs=2", "application/json"
"application/vnd.citationstyles.csl+json". optional}
\item{summary}{(logical) get funding summary for a put code.
Default: \code{FALSE}}
\item{...}{Curl options passed on to \code{\link[crul:HttpClient]{crul::HttpClient()}}}
}
\value{
A list of results for each Orcid ID passed in, with each element
named by the Orcid ID
}
\description{
Get funding information for a person
}
\details{
This function is vectorized, so you can pass in many ORCID's, and
there's an element returned for each ORCID you put in.
}
\examples{
\dontrun{
# all funding data
res <- orcid_fundings(orcid = "0000-0002-1642-628X")
res$`0000-0002-1642-628X`
names(res$`0000-0002-1642-628X`)
res$`0000-0002-1642-628X`$`group`
# individual funding records
orcid_fundings(orcid = "0000-0002-1642-628X", 385627)
# funding summary information
orcid_fundings(orcid = "0000-0002-1642-628X", 385627, summary = TRUE)
}
}
| /man/orcid_fundings.Rd | permissive | awconway/rorcid | R | false | true | 1,595 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orcid_fundings.R
\name{orcid_fundings}
\alias{orcid_fundings}
\title{Get funding information for a person}
\usage{
orcid_fundings(orcid, put_code = NULL, format = "application/json",
summary = FALSE, ...)
}
\arguments{
\item{orcid}{(character) Orcid identifier(s), of the form
XXXX-XXXX-XXXX-XXXX. required.}
\item{put_code}{(character/integer) one or more put codes. up to
50. optional}
\item{format}{(character) Name of the content-type format. One of
"application/vnd.orcid+xml; qs=5", "application/orcid+xml; qs=3",
"application/xml", "application/vnd.orcid+json; qs=4",
"application/orcid+json; qs=2", "application/json"
"application/vnd.citationstyles.csl+json". optional}
\item{summary}{(logical) get funding summary for a put code.
Default: \code{FALSE}}
\item{...}{Curl options passed on to \code{\link[crul:HttpClient]{crul::HttpClient()}}}
}
\value{
A list of results for each Orcid ID passed in, with each element
named by the Orcid ID
}
\description{
Get funding information for a person
}
\details{
This function is vectorized, so you can pass in many ORCID's, and
there's an element returned for each ORCID you put in.
}
\examples{
\dontrun{
# all funding data
res <- orcid_fundings(orcid = "0000-0002-1642-628X")
res$`0000-0002-1642-628X`
names(res$`0000-0002-1642-628X`)
res$`0000-0002-1642-628X`$`group`
# individual funding records
orcid_fundings(orcid = "0000-0002-1642-628X", 385627)
# funding summary information
orcid_fundings(orcid = "0000-0002-1642-628X", 385627, summary = TRUE)
}
}
|
new_rng_snapshots <- utils::compareVersion("3.6.0", as.character(getRversion())) > 0
# New (as of 4.3.0) a new option generates different snapshots
rankdeficient_version <- any(names(formals("predict.lm")) == "rankdeficient")
helper_objects_tune <- function() {
rec_tune_1 <-
recipes::recipe(mpg ~ ., data = mtcars) %>%
recipes::step_normalize(recipes::all_predictors()) %>%
recipes::step_pca(recipes::all_predictors(), num_comp = tune())
rec_no_tune_1 <-
recipes::recipe(mpg ~ ., data = mtcars) %>%
recipes::step_normalize(recipes::all_predictors())
lm_mod <- parsnip::linear_reg() %>% parsnip::set_engine("lm")
svm_mod <- parsnip::svm_rbf(mode = "regression", cost = tune()) %>%
parsnip::set_engine("kernlab")
list(
rec_tune_1 = rec_tune_1,
rec_no_tune_1 = rec_no_tune_1,
lm_mod = lm_mod,
svm_mod = svm_mod
)
}
| /tests/testthat/helper-tune-package.R | permissive | tidymodels/tune | R | false | false | 872 | r | new_rng_snapshots <- utils::compareVersion("3.6.0", as.character(getRversion())) > 0
# New (as of 4.3.0) a new option generates different snapshots
rankdeficient_version <- any(names(formals("predict.lm")) == "rankdeficient")
helper_objects_tune <- function() {
rec_tune_1 <-
recipes::recipe(mpg ~ ., data = mtcars) %>%
recipes::step_normalize(recipes::all_predictors()) %>%
recipes::step_pca(recipes::all_predictors(), num_comp = tune())
rec_no_tune_1 <-
recipes::recipe(mpg ~ ., data = mtcars) %>%
recipes::step_normalize(recipes::all_predictors())
lm_mod <- parsnip::linear_reg() %>% parsnip::set_engine("lm")
svm_mod <- parsnip::svm_rbf(mode = "regression", cost = tune()) %>%
parsnip::set_engine("kernlab")
list(
rec_tune_1 = rec_tune_1,
rec_no_tune_1 = rec_no_tune_1,
lm_mod = lm_mod,
svm_mod = svm_mod
)
}
|
###############################################################################
# parametros
orden_stam = TRUE
zoom = F
unidad_par_t = 'tiempo'
#unidad_par_t = 'epocas'
usar.log = F
# tamano de la ventana analizada, nombre raro para evitar confusiones
dur.chunk = 1
archivo.excel = '/pvalores_Hurst.xlsx'
###############################################################################
# directorios de trabajo
#
# gral : de uso general
# info : detalles de los participantes
# scripts : sub-rutinas, en caso de haberlas
# res_pre : resultados previos, solo para analizar y/o graficar
# epocas : epocas para resaltar, por ahora solo MOR
# graf : donde guardar los graficos, en caso de producirse
dir_gral = 'C:/Users/EQUIPO 1/Desktop/julio/TESIS/articulo_dfa'
dir_info = 'C:/Users/EQUIPO 1/Desktop/julio/TESIS/articulo_dfa'
dir_scripts = 'C:/Users/EQUIPO 1/Desktop/julio/TESIS/articulo_dfa/scripts'
dir_res_pre = 'C:/Users/EQUIPO 1/Desktop/julio/TESIS/articulo_dfa/Hurst'
dir_epocas = 'C:/Users/EQUIPO 1/Desktop/julio/epocas_dfa_10'
dir_graf = 'C:/Users/EQUIPO 1/Desktop/julio/TESIS/articulo_dfa/graf_def'
###############################################################################
# librerias
Sys.setenv(JAVA_HOME='C:\\Program Files\\Java\\jdk1.8.0_65')
require('readxl')
#require('xlsx')
require('ggplot2')
require('ggpubr')
require('Rmisc')
require('reshape')
# sub-rutinas que acortan el codigo
source(paste0(dir_scripts,'/utileria.R'))
###############################################################################
# datos generales
info = read_excel(paste0(dir_info,'/info_tecnico.xlsx'))
kanales = read_excel(paste0(dir_info,'/info_canales.xlsx'))
if(orden_stam){
kanales = read_excel(paste0(dir_info,'/info_canales_alterno.xlsx'))
}
n.canales = length(kanales$Etiqueta)
canales.arch = kanales$Nombre_archivo
###############################################################################
# cargar los datos
raw = read_excel(paste0(dir_res_pre,'/dfa_asdataframe.xlsx'),sheet='uno')
raw = as.data.frame(raw)
Hurst.MOR = melt(raw,id=c('Sujeto','Grupo','Edad',
'MMSE','Neuropsi',
'MORn','Epoca','Etapa'))
colnames(Hurst.MOR) = c('Sujeto','Grupo','Edad','MMSE','Neuropsi',
'MORn','Epoca','Etapa','Canal_var','Hurst')
Hurst.MOR = Hurst.MOR[!is.na(Hurst.MOR$Hurst),]
Hurst.MOR$Canal_var = as.numeric(Hurst.MOR$Canal_var)
Hurst.MOR$Sujeto_n = factor(Hurst.MOR$Sujeto,labels = info$Nombre[1:10])
Hurst.MOR$Etapa = rep(1,length(Hurst.MOR$Sujeto))
Hurst.MOR.promedio = c()
for(suj in 1:10){
tmp = Hurst.MOR[grep(info$Nombre[suj],Hurst.MOR$Sujeto_n),]
tmp$Sujeto_n = tmp$Sujeto
promedios = aggregate(tmp,by=list(tmp$Canal_var),median)
Hurst.MOR.promedio = rbind(Hurst.MOR.promedio,promedios)
}
Hurst.MOR.promedio$Sujeto_n = info$Nombre[Hurst.MOR.promedio$Sujeto]
if(usar.log){
Hurst.MOR$Hurst = log(Hurst.MOR$Hurst)
Hurst.promedio$Hurst = log(Hurst.promedio$Hurst)
}
# problemas con etiquetas
Hurst.MOR$Canal_var = factor(Hurst.MOR$Canal_var,
labels=kanales$Etiqueta)
Hurst.MOR$Grupo = factor(Hurst.MOR$Grupo,
labels=c('CTRL','PMCI'))
Hurst.MOR.promedio$Canal_var = factor(Hurst.MOR.promedio$Canal_var,
labels=kanales$Etiqueta)
Hurst.MOR.promedio$Grupo = factor(Hurst.MOR.promedio$Grupo,
labels=c('CTRL','PMCI'))
Hurst.MOR.promedio = as.data.frame(Hurst.MOR.promedio)
promedios.MOR = summarySE(Hurst.MOR,measurevar='Hurst',
groupvars=c('Grupo','Canal_var'),na.rm=T)
###############################################################################
# lo mismo para NMOR
raw = read_excel(paste0(dir_res_pre,'/dfa_asdataframe.xlsx'),sheet='uno_pre')
raw = as.data.frame(raw)
Hurst.NMOR = melt(raw,id=c('Sujeto','Grupo','Edad',
'MMSE','Neuropsi',
'MORn','Epoca','Etapa'))
colnames(Hurst.NMOR) = c('Sujeto','Grupo','Edad','MMSE','Neuropsi',
'MORn','Epoca','Etapa','Canal_var','Hurst')
Hurst.NMOR = Hurst.NMOR[!is.na(Hurst.NMOR$Hurst),]
Hurst.NMOR$Canal_var = as.numeric(Hurst.NMOR$Canal_var)
Hurst.NMOR$Sujeto_n = factor(Hurst.NMOR$Sujeto,labels = info$Nombre[1:10])
Hurst.NMOR$Etapa = rep(0,length(Hurst.NMOR$Sujeto))
Hurst.NMOR.promedio = c()
for(suj in 1:10){
tmp = Hurst.NMOR[grep(info$Nombre[suj],Hurst.NMOR$Sujeto_n),]
tmp$Sujeto_n = tmp$Sujeto
promedios = aggregate(tmp,by=list(tmp$Canal_var),median)
Hurst.NMOR.promedio = rbind(Hurst.NMOR.promedio,promedios)
}
Hurst.NMOR.promedio$Sujeto_n = info$Nombre[Hurst.NMOR.promedio$Sujeto]
if(usar.log){
Hurst.NMOR$Hurst = log(Hurst.NMOR$Hurst)
Hurst.promedio$Hurst = log(Hurst.promedio$Hurst)
}
# problemas con etiquetas
Hurst.NMOR$Canal_var = factor(Hurst.NMOR$Canal_var,
labels=kanales$Etiqueta)
Hurst.NMOR$Grupo = factor(Hurst.NMOR$Grupo,
labels=c('CTRL','PMCI'))
Hurst.NMOR.promedio$Canal_var = factor(Hurst.NMOR.promedio$Canal_var,
labels=kanales$Etiqueta)
Hurst.NMOR.promedio$Grupo = factor(Hurst.NMOR.promedio$Grupo,
labels=c('CTRL','PMCI'))
Hurst.NMOR.promedio = as.data.frame(Hurst.NMOR.promedio)
promedios.NMOR = summarySE(Hurst.NMOR,measurevar='Hurst',
groupvars=c('Grupo','Canal_var'),na.rm=T)
Hurst.MOR$Etapa = rep('REM',length(Hurst.MOR$Etapa))
Hurst.NMOR$Etapa = rep('NREM',length(Hurst.NMOR$Etapa))
Hurst.todo = rbind(Hurst.MOR,Hurst.NMOR)
promedios.MOR$Etapa = rep('REM',length(promedios.MOR$Grupo))
promedios.NMOR$Etapa = rep('NREM',length(promedios.NMOR$Grupo))
promedios.todo = rbind(promedios.MOR,promedios.NMOR)
####################################################################33
colnames(Hurst.MOR.promedio)[1] = 'Canal'
Hurst.MOR.promedio$Canal = factor(Hurst.MOR.promedio$Canal,
labels = kanales$Etiqueta)
colnames(Hurst.NMOR.promedio)[1] = 'Canal'
Hurst.NMOR.promedio$Canal = factor(Hurst.NMOR.promedio$Canal,
labels = kanales$Etiqueta)
Hurst.todo.promedio = rbind(Hurst.MOR.promedio,
Hurst.NMOR.promedio)
Hurst.todo.promedio$Etapa = factor(Hurst.todo.promedio$Etapa,
labels = c('NREM','REM'))
####################################################################33
# analisis ANOVA
big.m.Grupo = c()
big.m.Etapa = c()
big.m.Inter = c()
big.s.Grupo = c()
big.s.Etapa = c()
big.s.Inter = c()
big.summary = c()
for(ch in 1:22){
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
#aov
k = summary(aov)
k = k[[1]]
print(k)
#pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
#ph_etapa = pos_hoc[['Etapa']]
#ph_grupo = pos_hoc[['Grupo']]
#ph_inter = pos_hoc[['Grupo:Etapa']]
p = ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
theme_classic2() +
labs(linetype=ch.actual)+
geom_line(aes(group=Etapa))+
geom_point()
print(p)
#q = model.tables(aov.EEG,'means')
#q = model.tables(aov,'means',se=T)
#qt = q[["tables"]]
#qs = q[["se"]]
#qt.Grupo = qt$`factor(Grupo)`
#qt.Etapa = qt$`factor(Etapa)`
#qt.Inter = qt$`factor(Grupo:Etapa)`
#big.m.Grupo = rbind(big.m.Grupo,qt.Grupo)
#big.m.Etapa = rbind(big.m.Etapa,qt.Etapa)
#big.m.Inter = rbind(big.m.Inter,qt.Inter)
qs = summarySE(data=tmp,groupvars=c('Grupo','Etapa'),
measurevar='Hurst')
qs2 = unlist(t(qs))
qs2 = as.list((qs2))
qs2 = unlist(t(qs2))
big.summary = rbind(big.summary,qs2)
invisible(readline(prompt="Presion [enter] para continuar"))
}
#big.Grupo = as.data.frame(big.Grupo)
#big.Etapa = as.data.frame(big.Etapa)
#big.Inter = as.data.frame(big.Inter)
#big.Grupo$Canal = kanales$Etiqueta
#big.Etapa$Canal = kanales$Etiqueta
#big.Inter$Canal = kanales$Etiqueta
##stop()
biggr = c()
ch = 21
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov.EEG = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
aov.EEG
k = summary(aov.EEG)
k = k[[1]]
#plot(aov.EEG)
q = model.tables(aov.EEG)
qt = q[["tables"]]
q.Grupo = qt$`factor(Grupo)`
q.Etapa = qt$`factor(Etapa)`
q.Inter = qt$`factor(Grupo:Etapa)`
#pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
#ph_etapa = pos_hoc[['Etapa']]
#ph_grupo = pos_hoc[['Grupo']]
#ph_inter = pos_hoc[['Grupo:Etapa']]
ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
labs(title=ch.actual)+
theme_classic2() +
ylab('Hurst Exponent') + xlab(NULL)+
labs(linetype=NULL)+
#coord_cartesian(ylim=c(1.2,1.5))+
theme(legend.position=c(1,1),legend.direction = 'horizontal',
legend.justification=c(1,0))+
geom_line(aes(group=Etapa))+
geom_point()
ggsave(filename = 'ANOVAS_1.png',device='png',height = 4,width = 4,
path=dir_graf)
biggr = rbind(biggr,tmp.m)
ch = 20
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov.EEG = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
aov.EEG
print(summary(aov.EEG))
#plot(aov.EEG)
model.tables(aov.EEG)
pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
ph_etapa = pos_hoc[['Etapa']]
ph_grupo = pos_hoc[['Grupo']]
ph_inter = pos_hoc[['Grupo:Etapa']]
ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
labs(title=ch.actual)+
theme_classic2() +
ylab('Hurst Exponent') + xlab(NULL)+
labs(linetype=NULL)+
#coord_cartesian(ylim=c(1.2,1.5))+
theme(legend.position=c(1,1),legend.direction = 'horizontal',
legend.justification=c(1,0))+
geom_line(aes(group=Etapa))+
geom_point()
ggsave(filename = 'ANOVAS_2.png',device='png',height = 4,width = 4,
path=dir_graf)
biggr = rbind(biggr,tmp.m)
###############################################################################
###############################################################################
# INTERCANALES
###############################################################################
###############################################################################
kanales = read_excel(paste0(dir_info,'/info_intercanales.xlsx'))
if(orden_stam){
kanales = read_excel(paste0(dir_info,'/info_intercanales_alterno.xlsx'))
}
n.canales = length(kanales$Etiqueta)
canales.arch = kanales$Nombre_archivo
# cargar los datos
raw = read_excel(paste0(dir_res_pre,'/dfa_asdataframe.xlsx'),sheet='multi')
raw = as.data.frame(raw)
Hurst.MOR = melt(raw,id=c('Sujeto','Grupo','Edad',
'MMSE','Neuropsi',
'MORn','Epoca','Etapa'))
colnames(Hurst.MOR) = c('Sujeto','Grupo','Edad','MMSE','Neuropsi',
'MORn','Epoca','Etapa','Canal_var','Hurst')
Hurst.MOR = Hurst.MOR[!is.na(Hurst.MOR$Hurst),]
Hurst.MOR$Canal_var = as.numeric(Hurst.MOR$Canal_var)
Hurst.MOR$Sujeto_n = factor(Hurst.MOR$Sujeto,labels = info$Nombre[1:10])
Hurst.MOR$Etapa = rep(1,length(Hurst.MOR$Sujeto))
Hurst.MOR.promedio = c()
for(suj in 1:10){
tmp = Hurst.MOR[grep(info$Nombre[suj],Hurst.MOR$Sujeto_n),]
tmp$Sujeto_n = tmp$Sujeto
promedios = aggregate(tmp,by=list(tmp$Canal_var),median)
Hurst.MOR.promedio = rbind(Hurst.MOR.promedio,promedios)
}
Hurst.MOR.promedio$Sujeto_n = info$Nombre[Hurst.MOR.promedio$Sujeto]
if(usar.log){
Hurst.MOR$Hurst = log(Hurst.MOR$Hurst)
Hurst.promedio$Hurst = log(Hurst.promedio$Hurst)
}
# problemas con etiquetas
Hurst.MOR$Canal_var = factor(Hurst.MOR$Canal_var,
labels=kanales$Etiqueta)
Hurst.MOR$Grupo = factor(Hurst.MOR$Grupo,
labels=c('CTRL','PMCI'))
Hurst.MOR.promedio$Canal_var = factor(Hurst.MOR.promedio$Canal_var,
labels=kanales$Etiqueta)
Hurst.MOR.promedio$Grupo = factor(Hurst.MOR.promedio$Grupo,
labels=c('CTRL','PMCI'))
Hurst.MOR.promedio = as.data.frame(Hurst.MOR.promedio)
promedios.MOR = summarySE(Hurst.MOR,measurevar='Hurst',
groupvars=c('Grupo','Canal_var'),na.rm=T)
###############################################################################
# lo mismo para NMOR
raw = read_excel(paste0(dir_res_pre,'/dfa_asdataframe.xlsx'),sheet='multi_pre')
raw = as.data.frame(raw)
Hurst.NMOR = melt(raw,id=c('Sujeto','Grupo','Edad',
'MMSE','Neuropsi',
'MORn','Epoca','Etapa'))
colnames(Hurst.NMOR) = c('Sujeto','Grupo','Edad','MMSE','Neuropsi',
'MORn','Epoca','Etapa','Canal_var','Hurst')
Hurst.NMOR = Hurst.NMOR[!is.na(Hurst.NMOR$Hurst),]
Hurst.NMOR$Canal_var = as.numeric(Hurst.NMOR$Canal_var)
Hurst.NMOR$Sujeto_n = factor(Hurst.NMOR$Sujeto,labels = info$Nombre[1:10])
Hurst.NMOR$Etapa = rep(0,length(Hurst.NMOR$Sujeto))
Hurst.NMOR.promedio = c()
for(suj in 1:10){
tmp = Hurst.NMOR[grep(info$Nombre[suj],Hurst.NMOR$Sujeto_n),]
tmp$Sujeto_n = tmp$Sujeto
promedios = aggregate(tmp,by=list(tmp$Canal_var),median)
Hurst.NMOR.promedio = rbind(Hurst.NMOR.promedio,promedios)
}
Hurst.NMOR.promedio$Sujeto_n = info$Nombre[Hurst.NMOR.promedio$Sujeto]
if(usar.log){
Hurst.NMOR$Hurst = log(Hurst.NMOR$Hurst)
Hurst.promedio$Hurst = log(Hurst.promedio$Hurst)
}
# problemas con etiquetas
Hurst.NMOR$Canal_var = factor(Hurst.NMOR$Canal_var,
labels=kanales$Etiqueta)
Hurst.NMOR$Grupo = factor(Hurst.NMOR$Grupo,
labels=c('CTRL','PMCI'))
Hurst.NMOR.promedio$Canal_var = factor(Hurst.NMOR.promedio$Canal_var,
labels=kanales$Etiqueta)
Hurst.NMOR.promedio$Grupo = factor(Hurst.NMOR.promedio$Grupo,
labels=c('CTRL','PMCI'))
Hurst.NMOR.promedio = as.data.frame(Hurst.NMOR.promedio)
promedios.NMOR = summarySE(Hurst.NMOR,measurevar='Hurst',
groupvars=c('Grupo','Canal_var'),na.rm=T)
###############################################################################
# Intercanales
Hurst.MOR$Etapa = rep('REM',length(Hurst.MOR$Etapa))
Hurst.NMOR$Etapa = rep('NREM',length(Hurst.NMOR$Etapa))
Hurst.todo = rbind(Hurst.MOR,Hurst.NMOR)
promedios.MOR$Etapa = rep('REM',length(promedios.MOR$Grupo))
promedios.NMOR$Etapa = rep('NREM',length(promedios.NMOR$Grupo))
promedios.todo = rbind(promedios.MOR,promedios.NMOR)
colnames(Hurst.MOR.promedio)[1] = 'Canal'
Hurst.MOR.promedio$Canal = factor(Hurst.MOR.promedio$Canal,
labels = kanales$Etiqueta)
colnames(Hurst.NMOR.promedio)[1] = 'Canal'
Hurst.NMOR.promedio$Canal = factor(Hurst.NMOR.promedio$Canal,
labels = kanales$Etiqueta)
Hurst.todo.promedio = rbind(Hurst.MOR.promedio,
Hurst.NMOR.promedio)
Hurst.todo.promedio$Etapa = factor(Hurst.todo.promedio$Etapa,
labels = c('NREM','REM'))
####################################################################33
# analisis ANOVA
if(FALSE){
emg = is.element(Hurst.todo.promedio$Canal_var,c('EMG'))
eog = is.element(Hurst.todo.promedio$Canal_var,c('LOG','ROG'))
eeg = !is.element(Hurst.todo.promedio$Canal_var,c('EMG','LOG','ROG'))
Hurst.m.EEG = Hurst.todo.promedio[eeg,]
Hurst.m.EOG = Hurst.todo.promedio[eog,]
Hurst.m.EMG = Hurst.todo.promedio[emg,]
ch = 1
for(ch in 1:22){
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov.EEG = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
aov.EEG
print(summary(aov.EEG))
#plot(aov.EEG)
model.tables(aov.EEG)
pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
ph_etapa = pos_hoc[['Etapa']]
ph_grupo = pos_hoc[['Grupo']]
ph_inter = pos_hoc[['Grupo:Etapa']]
p = ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
theme_classic2() +
labs(linetype=ch.actual)+
geom_line(aes(group=Etapa))+
geom_point()
print(p)
invisible(readline(prompt="Presion [enter] para continuar"))
}
}
big.m.Grupo = c()
big.m.Etapa = c()
big.m.Inter = c()
big.s.Grupo = c()
big.s.Etapa = c()
big.s.Inter = c()
big.summary = c()
for(ch in 1:n.canales){
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
#aov
k = summary(aov)
k = k[[1]]
#pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
#ph_etapa = pos_hoc[['Etapa']]
#ph_grupo = pos_hoc[['Grupo']]
#ph_inter = pos_hoc[['Grupo:Etapa']]
#p = ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
# theme_classic2() +
# labs(linetype=ch.actual)+
# geom_line(aes(group=Etapa))+
# geom_point()
#print(p)
#q = model.tables(aov.EEG,'means')
#q = model.tables(aov,'means',se=T)
#qt = q[["tables"]]
#qs = q[["se"]]
#qt.Grupo = qt$`factor(Grupo)`
#qt.Etapa = qt$`factor(Etapa)`
#qt.Inter = qt$`factor(Grupo:Etapa)`
#big.m.Grupo = rbind(big.m.Grupo,qt.Grupo)
#big.m.Etapa = rbind(big.m.Etapa,qt.Etapa)
#big.m.Inter = rbind(big.m.Inter,qt.Inter)
qs = summarySE(data=tmp,groupvars=c('Grupo','Etapa'),
measurevar='Hurst')
qs2 = unlist(t(qs))
qs2 = as.list((qs2))
qs2 = unlist(t(qs2))
big.summary = rbind(big.summary,qs2)
#invisible(readline(prompt="Presion [enter] para continuar"))
}
ch = 9
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov.EEG = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
aov.EEG
print(summary(aov.EEG))
#plot(aov.EEG)
model.tables(aov.EEG)
pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
ph_etapa = pos_hoc[['Etapa']]
ph_grupo = pos_hoc[['Grupo']]
ph_inter = pos_hoc[['Grupo:Etapa']]
ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
labs(title=ch.actual)+
theme_classic2() +
ylab('Hurst Exponent') + xlab(NULL)+
labs(linetype=NULL)+
#coord_cartesian(ylim=c(1.2,1.5))+
theme(legend.position=c(1,1),legend.direction = 'horizontal',
legend.justification=c(1,0))+
geom_line(aes(group=Etapa))+
geom_point()
ggsave(filename = 'ANOVAS_3.png',device='png',height = 4,width = 4,
path=dir_graf)
biggr = rbind(biggr,tmp.m)
#stop()
ggplot(biggr,aes(x=Grupo,y=Hurst,linetype=Etapa))+
#labs(title=ch.actual)+
#labs(title=' ')+
theme_classic2() +
ylab('Hurst Exponent') + xlab(NULL)+
labs(linetype=NULL)+
coord_cartesian(ylim=c(1.2,1.6))+
#theme(legend.position=c(1,1),legend.direction = 'horizontal',
# legend.justification=c(1,0))+
theme(legend.position = 'top')+
facet_grid(.~Canal_var) +
theme(strip.background = element_blank())+
geom_line(aes(group=Etapa))+
geom_point()
ggsave(filename = 'ANOVAS_ojos.png',device='png',height = 4,width = 6,
path=dir_graf,dpi=400)
#ggplot(biggr,aes(x=Etapa,y=Hurst,linetype=Grupo))+
# #labs(title=ch.actual)+
# #labs(title=' ')+
# theme_classic2() +
# ylab('Hurst Exponent') + xlab(NULL)+
# labs(linetype=NULL)+
# coord_cartesian(ylim=c(1.2,1.6))+
# #theme(legend.position=c(1,1),legend.direction = 'horizontal',
# # legend.justification=c(1,0))+
# theme(legend.position = 'top')+
# facet_grid(.~Canal_var) +
# theme(strip.background = element_blank())+
# geom_line(aes(group=Grupo))+
# geom_point()
#ggsave(filename = 'ANOVAS_ojos_2.png',device='png',height = 4,width = 6,
# path=dir_graf,dpi=400)
ggplot(biggr,aes(x=Etapa,y=Hurst))+
theme_classic2() +
ylab('Hurst Exponent') + xlab(NULL)+
labs(linetype=NULL)+
coord_cartesian(ylim=c(1.2,1.6))+
theme(legend.position = 'top')+
facet_grid(.~Canal_var) +
theme(strip.background = element_blank())+
geom_line(aes(group=Grupo,linetype=Grupo))+
geom_errorbar(aes(ymin=Hurst-se,ymax=Hurst+se),width=.1,
color='grey40') +
geom_point()
ggsave(filename = 'Fig03_ANOVAS.png',device='png',height = 6,width = 8,
unit='cm',
path=dir_graf,dpi=400,scale=2)
#stop()
big.Grupo = c()
big.Etapa = c()
big.Inter = c()
ch = 1
for(ch in 1:length(kanales$Etiqueta)){
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov.EEG = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
aov.EEG
#print(summary(aov.EEG))
#plot(aov.EEG)
model.tables(aov.EEG)
k = summary(aov.EEG)
k = k[[1]]
pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
ph_etapa = pos_hoc[['Etapa']]
ph_grupo = pos_hoc[['Grupo']]
ph_inter = pos_hoc[['Grupo:Etapa']]
p = ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
theme_classic2() +
labs(linetype=ch.actual)+
geom_line(aes(group=Etapa))+
geom_point()
#print(p)
q = model.tables(aov.EEG,'means')
qt = q[["tables"]]
q.Grupo = qt$`factor(Grupo)`
q.Etapa = qt$`factor(Etapa)`
q.Inter = qt$`factor(Grupo:Etapa)`
big.Grupo = rbind(big.Grupo,q.Grupo)
big.Etapa = rbind(big.Etapa,q.Etapa)
big.Inter = rbind(big.Inter,q.Inter)
invisible(readline(prompt="Presion [enter] para continuar"))
}
big.Grupo = as.data.frame(big.Grupo)
big.Etapa = as.data.frame(big.Etapa)
big.Inter = as.data.frame(big.Inter)
big.Grupo$Canal = kanales$Etiqueta
big.Etapa$Canal = kanales$Etiqueta
big.Inter$Canal = kanales$Etiqueta
| /articulo_dfa/scripts/graf_hurst08_win_median.R | no_license | amoneta/TESIS_JULIO | R | false | false | 22,777 | r | ###############################################################################
# parametros
orden_stam = TRUE
zoom = F
unidad_par_t = 'tiempo'
#unidad_par_t = 'epocas'
usar.log = F
# tamano de la ventana analizada, nombre raro para evitar confusiones
dur.chunk = 1
archivo.excel = '/pvalores_Hurst.xlsx'
###############################################################################
# directorios de trabajo
#
# gral : de uso general
# info : detalles de los participantes
# scripts : sub-rutinas, en caso de haberlas
# res_pre : resultados previos, solo para analizar y/o graficar
# epocas : epocas para resaltar, por ahora solo MOR
# graf : donde guardar los graficos, en caso de producirse
dir_gral = 'C:/Users/EQUIPO 1/Desktop/julio/TESIS/articulo_dfa'
dir_info = 'C:/Users/EQUIPO 1/Desktop/julio/TESIS/articulo_dfa'
dir_scripts = 'C:/Users/EQUIPO 1/Desktop/julio/TESIS/articulo_dfa/scripts'
dir_res_pre = 'C:/Users/EQUIPO 1/Desktop/julio/TESIS/articulo_dfa/Hurst'
dir_epocas = 'C:/Users/EQUIPO 1/Desktop/julio/epocas_dfa_10'
dir_graf = 'C:/Users/EQUIPO 1/Desktop/julio/TESIS/articulo_dfa/graf_def'
###############################################################################
# librerias
Sys.setenv(JAVA_HOME='C:\\Program Files\\Java\\jdk1.8.0_65')
require('readxl')
#require('xlsx')
require('ggplot2')
require('ggpubr')
require('Rmisc')
require('reshape')
# sub-rutinas que acortan el codigo
source(paste0(dir_scripts,'/utileria.R'))
###############################################################################
# datos generales
info = read_excel(paste0(dir_info,'/info_tecnico.xlsx'))
kanales = read_excel(paste0(dir_info,'/info_canales.xlsx'))
if(orden_stam){
kanales = read_excel(paste0(dir_info,'/info_canales_alterno.xlsx'))
}
n.canales = length(kanales$Etiqueta)
canales.arch = kanales$Nombre_archivo
###############################################################################
# cargar los datos
raw = read_excel(paste0(dir_res_pre,'/dfa_asdataframe.xlsx'),sheet='uno')
raw = as.data.frame(raw)
Hurst.MOR = melt(raw,id=c('Sujeto','Grupo','Edad',
'MMSE','Neuropsi',
'MORn','Epoca','Etapa'))
colnames(Hurst.MOR) = c('Sujeto','Grupo','Edad','MMSE','Neuropsi',
'MORn','Epoca','Etapa','Canal_var','Hurst')
Hurst.MOR = Hurst.MOR[!is.na(Hurst.MOR$Hurst),]
Hurst.MOR$Canal_var = as.numeric(Hurst.MOR$Canal_var)
Hurst.MOR$Sujeto_n = factor(Hurst.MOR$Sujeto,labels = info$Nombre[1:10])
Hurst.MOR$Etapa = rep(1,length(Hurst.MOR$Sujeto))
Hurst.MOR.promedio = c()
for(suj in 1:10){
tmp = Hurst.MOR[grep(info$Nombre[suj],Hurst.MOR$Sujeto_n),]
tmp$Sujeto_n = tmp$Sujeto
promedios = aggregate(tmp,by=list(tmp$Canal_var),median)
Hurst.MOR.promedio = rbind(Hurst.MOR.promedio,promedios)
}
Hurst.MOR.promedio$Sujeto_n = info$Nombre[Hurst.MOR.promedio$Sujeto]
if(usar.log){
Hurst.MOR$Hurst = log(Hurst.MOR$Hurst)
Hurst.promedio$Hurst = log(Hurst.promedio$Hurst)
}
# problemas con etiquetas
Hurst.MOR$Canal_var = factor(Hurst.MOR$Canal_var,
labels=kanales$Etiqueta)
Hurst.MOR$Grupo = factor(Hurst.MOR$Grupo,
labels=c('CTRL','PMCI'))
Hurst.MOR.promedio$Canal_var = factor(Hurst.MOR.promedio$Canal_var,
labels=kanales$Etiqueta)
Hurst.MOR.promedio$Grupo = factor(Hurst.MOR.promedio$Grupo,
labels=c('CTRL','PMCI'))
Hurst.MOR.promedio = as.data.frame(Hurst.MOR.promedio)
promedios.MOR = summarySE(Hurst.MOR,measurevar='Hurst',
groupvars=c('Grupo','Canal_var'),na.rm=T)
###############################################################################
# lo mismo para NMOR
raw = read_excel(paste0(dir_res_pre,'/dfa_asdataframe.xlsx'),sheet='uno_pre')
raw = as.data.frame(raw)
Hurst.NMOR = melt(raw,id=c('Sujeto','Grupo','Edad',
'MMSE','Neuropsi',
'MORn','Epoca','Etapa'))
colnames(Hurst.NMOR) = c('Sujeto','Grupo','Edad','MMSE','Neuropsi',
'MORn','Epoca','Etapa','Canal_var','Hurst')
Hurst.NMOR = Hurst.NMOR[!is.na(Hurst.NMOR$Hurst),]
Hurst.NMOR$Canal_var = as.numeric(Hurst.NMOR$Canal_var)
Hurst.NMOR$Sujeto_n = factor(Hurst.NMOR$Sujeto,labels = info$Nombre[1:10])
Hurst.NMOR$Etapa = rep(0,length(Hurst.NMOR$Sujeto))
Hurst.NMOR.promedio = c()
for(suj in 1:10){
tmp = Hurst.NMOR[grep(info$Nombre[suj],Hurst.NMOR$Sujeto_n),]
tmp$Sujeto_n = tmp$Sujeto
promedios = aggregate(tmp,by=list(tmp$Canal_var),median)
Hurst.NMOR.promedio = rbind(Hurst.NMOR.promedio,promedios)
}
Hurst.NMOR.promedio$Sujeto_n = info$Nombre[Hurst.NMOR.promedio$Sujeto]
if(usar.log){
Hurst.NMOR$Hurst = log(Hurst.NMOR$Hurst)
Hurst.promedio$Hurst = log(Hurst.promedio$Hurst)
}
# problemas con etiquetas
Hurst.NMOR$Canal_var = factor(Hurst.NMOR$Canal_var,
labels=kanales$Etiqueta)
Hurst.NMOR$Grupo = factor(Hurst.NMOR$Grupo,
labels=c('CTRL','PMCI'))
Hurst.NMOR.promedio$Canal_var = factor(Hurst.NMOR.promedio$Canal_var,
labels=kanales$Etiqueta)
Hurst.NMOR.promedio$Grupo = factor(Hurst.NMOR.promedio$Grupo,
labels=c('CTRL','PMCI'))
Hurst.NMOR.promedio = as.data.frame(Hurst.NMOR.promedio)
promedios.NMOR = summarySE(Hurst.NMOR,measurevar='Hurst',
groupvars=c('Grupo','Canal_var'),na.rm=T)
Hurst.MOR$Etapa = rep('REM',length(Hurst.MOR$Etapa))
Hurst.NMOR$Etapa = rep('NREM',length(Hurst.NMOR$Etapa))
Hurst.todo = rbind(Hurst.MOR,Hurst.NMOR)
promedios.MOR$Etapa = rep('REM',length(promedios.MOR$Grupo))
promedios.NMOR$Etapa = rep('NREM',length(promedios.NMOR$Grupo))
promedios.todo = rbind(promedios.MOR,promedios.NMOR)
####################################################################33
colnames(Hurst.MOR.promedio)[1] = 'Canal'
Hurst.MOR.promedio$Canal = factor(Hurst.MOR.promedio$Canal,
labels = kanales$Etiqueta)
colnames(Hurst.NMOR.promedio)[1] = 'Canal'
Hurst.NMOR.promedio$Canal = factor(Hurst.NMOR.promedio$Canal,
labels = kanales$Etiqueta)
Hurst.todo.promedio = rbind(Hurst.MOR.promedio,
Hurst.NMOR.promedio)
Hurst.todo.promedio$Etapa = factor(Hurst.todo.promedio$Etapa,
labels = c('NREM','REM'))
####################################################################33
# analisis ANOVA
big.m.Grupo = c()
big.m.Etapa = c()
big.m.Inter = c()
big.s.Grupo = c()
big.s.Etapa = c()
big.s.Inter = c()
big.summary = c()
for(ch in 1:22){
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
#aov
k = summary(aov)
k = k[[1]]
print(k)
#pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
#ph_etapa = pos_hoc[['Etapa']]
#ph_grupo = pos_hoc[['Grupo']]
#ph_inter = pos_hoc[['Grupo:Etapa']]
p = ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
theme_classic2() +
labs(linetype=ch.actual)+
geom_line(aes(group=Etapa))+
geom_point()
print(p)
#q = model.tables(aov.EEG,'means')
#q = model.tables(aov,'means',se=T)
#qt = q[["tables"]]
#qs = q[["se"]]
#qt.Grupo = qt$`factor(Grupo)`
#qt.Etapa = qt$`factor(Etapa)`
#qt.Inter = qt$`factor(Grupo:Etapa)`
#big.m.Grupo = rbind(big.m.Grupo,qt.Grupo)
#big.m.Etapa = rbind(big.m.Etapa,qt.Etapa)
#big.m.Inter = rbind(big.m.Inter,qt.Inter)
qs = summarySE(data=tmp,groupvars=c('Grupo','Etapa'),
measurevar='Hurst')
qs2 = unlist(t(qs))
qs2 = as.list((qs2))
qs2 = unlist(t(qs2))
big.summary = rbind(big.summary,qs2)
invisible(readline(prompt="Presion [enter] para continuar"))
}
#big.Grupo = as.data.frame(big.Grupo)
#big.Etapa = as.data.frame(big.Etapa)
#big.Inter = as.data.frame(big.Inter)
#big.Grupo$Canal = kanales$Etiqueta
#big.Etapa$Canal = kanales$Etiqueta
#big.Inter$Canal = kanales$Etiqueta
##stop()
biggr = c()
ch = 21
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov.EEG = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
aov.EEG
k = summary(aov.EEG)
k = k[[1]]
#plot(aov.EEG)
q = model.tables(aov.EEG)
qt = q[["tables"]]
q.Grupo = qt$`factor(Grupo)`
q.Etapa = qt$`factor(Etapa)`
q.Inter = qt$`factor(Grupo:Etapa)`
#pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
#ph_etapa = pos_hoc[['Etapa']]
#ph_grupo = pos_hoc[['Grupo']]
#ph_inter = pos_hoc[['Grupo:Etapa']]
ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
labs(title=ch.actual)+
theme_classic2() +
ylab('Hurst Exponent') + xlab(NULL)+
labs(linetype=NULL)+
#coord_cartesian(ylim=c(1.2,1.5))+
theme(legend.position=c(1,1),legend.direction = 'horizontal',
legend.justification=c(1,0))+
geom_line(aes(group=Etapa))+
geom_point()
ggsave(filename = 'ANOVAS_1.png',device='png',height = 4,width = 4,
path=dir_graf)
biggr = rbind(biggr,tmp.m)
ch = 20
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov.EEG = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
aov.EEG
print(summary(aov.EEG))
#plot(aov.EEG)
model.tables(aov.EEG)
pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
ph_etapa = pos_hoc[['Etapa']]
ph_grupo = pos_hoc[['Grupo']]
ph_inter = pos_hoc[['Grupo:Etapa']]
ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
labs(title=ch.actual)+
theme_classic2() +
ylab('Hurst Exponent') + xlab(NULL)+
labs(linetype=NULL)+
#coord_cartesian(ylim=c(1.2,1.5))+
theme(legend.position=c(1,1),legend.direction = 'horizontal',
legend.justification=c(1,0))+
geom_line(aes(group=Etapa))+
geom_point()
ggsave(filename = 'ANOVAS_2.png',device='png',height = 4,width = 4,
path=dir_graf)
biggr = rbind(biggr,tmp.m)
###############################################################################
###############################################################################
# INTERCANALES
###############################################################################
###############################################################################
kanales = read_excel(paste0(dir_info,'/info_intercanales.xlsx'))
if(orden_stam){
kanales = read_excel(paste0(dir_info,'/info_intercanales_alterno.xlsx'))
}
n.canales = length(kanales$Etiqueta)
canales.arch = kanales$Nombre_archivo
# cargar los datos
raw = read_excel(paste0(dir_res_pre,'/dfa_asdataframe.xlsx'),sheet='multi')
raw = as.data.frame(raw)
Hurst.MOR = melt(raw,id=c('Sujeto','Grupo','Edad',
'MMSE','Neuropsi',
'MORn','Epoca','Etapa'))
colnames(Hurst.MOR) = c('Sujeto','Grupo','Edad','MMSE','Neuropsi',
'MORn','Epoca','Etapa','Canal_var','Hurst')
Hurst.MOR = Hurst.MOR[!is.na(Hurst.MOR$Hurst),]
Hurst.MOR$Canal_var = as.numeric(Hurst.MOR$Canal_var)
Hurst.MOR$Sujeto_n = factor(Hurst.MOR$Sujeto,labels = info$Nombre[1:10])
Hurst.MOR$Etapa = rep(1,length(Hurst.MOR$Sujeto))
Hurst.MOR.promedio = c()
for(suj in 1:10){
tmp = Hurst.MOR[grep(info$Nombre[suj],Hurst.MOR$Sujeto_n),]
tmp$Sujeto_n = tmp$Sujeto
promedios = aggregate(tmp,by=list(tmp$Canal_var),median)
Hurst.MOR.promedio = rbind(Hurst.MOR.promedio,promedios)
}
Hurst.MOR.promedio$Sujeto_n = info$Nombre[Hurst.MOR.promedio$Sujeto]
if(usar.log){
Hurst.MOR$Hurst = log(Hurst.MOR$Hurst)
Hurst.promedio$Hurst = log(Hurst.promedio$Hurst)
}
# problemas con etiquetas
Hurst.MOR$Canal_var = factor(Hurst.MOR$Canal_var,
labels=kanales$Etiqueta)
Hurst.MOR$Grupo = factor(Hurst.MOR$Grupo,
labels=c('CTRL','PMCI'))
Hurst.MOR.promedio$Canal_var = factor(Hurst.MOR.promedio$Canal_var,
labels=kanales$Etiqueta)
Hurst.MOR.promedio$Grupo = factor(Hurst.MOR.promedio$Grupo,
labels=c('CTRL','PMCI'))
Hurst.MOR.promedio = as.data.frame(Hurst.MOR.promedio)
promedios.MOR = summarySE(Hurst.MOR,measurevar='Hurst',
groupvars=c('Grupo','Canal_var'),na.rm=T)
###############################################################################
# lo mismo para NMOR
raw = read_excel(paste0(dir_res_pre,'/dfa_asdataframe.xlsx'),sheet='multi_pre')
raw = as.data.frame(raw)
Hurst.NMOR = melt(raw,id=c('Sujeto','Grupo','Edad',
'MMSE','Neuropsi',
'MORn','Epoca','Etapa'))
colnames(Hurst.NMOR) = c('Sujeto','Grupo','Edad','MMSE','Neuropsi',
'MORn','Epoca','Etapa','Canal_var','Hurst')
Hurst.NMOR = Hurst.NMOR[!is.na(Hurst.NMOR$Hurst),]
Hurst.NMOR$Canal_var = as.numeric(Hurst.NMOR$Canal_var)
Hurst.NMOR$Sujeto_n = factor(Hurst.NMOR$Sujeto,labels = info$Nombre[1:10])
Hurst.NMOR$Etapa = rep(0,length(Hurst.NMOR$Sujeto))
Hurst.NMOR.promedio = c()
for(suj in 1:10){
tmp = Hurst.NMOR[grep(info$Nombre[suj],Hurst.NMOR$Sujeto_n),]
tmp$Sujeto_n = tmp$Sujeto
promedios = aggregate(tmp,by=list(tmp$Canal_var),median)
Hurst.NMOR.promedio = rbind(Hurst.NMOR.promedio,promedios)
}
Hurst.NMOR.promedio$Sujeto_n = info$Nombre[Hurst.NMOR.promedio$Sujeto]
if(usar.log){
Hurst.NMOR$Hurst = log(Hurst.NMOR$Hurst)
Hurst.promedio$Hurst = log(Hurst.promedio$Hurst)
}
# problemas con etiquetas
Hurst.NMOR$Canal_var = factor(Hurst.NMOR$Canal_var,
labels=kanales$Etiqueta)
Hurst.NMOR$Grupo = factor(Hurst.NMOR$Grupo,
labels=c('CTRL','PMCI'))
Hurst.NMOR.promedio$Canal_var = factor(Hurst.NMOR.promedio$Canal_var,
labels=kanales$Etiqueta)
Hurst.NMOR.promedio$Grupo = factor(Hurst.NMOR.promedio$Grupo,
labels=c('CTRL','PMCI'))
Hurst.NMOR.promedio = as.data.frame(Hurst.NMOR.promedio)
promedios.NMOR = summarySE(Hurst.NMOR,measurevar='Hurst',
groupvars=c('Grupo','Canal_var'),na.rm=T)
###############################################################################
# Intercanales
Hurst.MOR$Etapa = rep('REM',length(Hurst.MOR$Etapa))
Hurst.NMOR$Etapa = rep('NREM',length(Hurst.NMOR$Etapa))
Hurst.todo = rbind(Hurst.MOR,Hurst.NMOR)
promedios.MOR$Etapa = rep('REM',length(promedios.MOR$Grupo))
promedios.NMOR$Etapa = rep('NREM',length(promedios.NMOR$Grupo))
promedios.todo = rbind(promedios.MOR,promedios.NMOR)
colnames(Hurst.MOR.promedio)[1] = 'Canal'
Hurst.MOR.promedio$Canal = factor(Hurst.MOR.promedio$Canal,
labels = kanales$Etiqueta)
colnames(Hurst.NMOR.promedio)[1] = 'Canal'
Hurst.NMOR.promedio$Canal = factor(Hurst.NMOR.promedio$Canal,
labels = kanales$Etiqueta)
Hurst.todo.promedio = rbind(Hurst.MOR.promedio,
Hurst.NMOR.promedio)
Hurst.todo.promedio$Etapa = factor(Hurst.todo.promedio$Etapa,
labels = c('NREM','REM'))
####################################################################33
# analisis ANOVA
if(FALSE){
emg = is.element(Hurst.todo.promedio$Canal_var,c('EMG'))
eog = is.element(Hurst.todo.promedio$Canal_var,c('LOG','ROG'))
eeg = !is.element(Hurst.todo.promedio$Canal_var,c('EMG','LOG','ROG'))
Hurst.m.EEG = Hurst.todo.promedio[eeg,]
Hurst.m.EOG = Hurst.todo.promedio[eog,]
Hurst.m.EMG = Hurst.todo.promedio[emg,]
ch = 1
for(ch in 1:22){
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov.EEG = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
aov.EEG
print(summary(aov.EEG))
#plot(aov.EEG)
model.tables(aov.EEG)
pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
ph_etapa = pos_hoc[['Etapa']]
ph_grupo = pos_hoc[['Grupo']]
ph_inter = pos_hoc[['Grupo:Etapa']]
p = ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
theme_classic2() +
labs(linetype=ch.actual)+
geom_line(aes(group=Etapa))+
geom_point()
print(p)
invisible(readline(prompt="Presion [enter] para continuar"))
}
}
big.m.Grupo = c()
big.m.Etapa = c()
big.m.Inter = c()
big.s.Grupo = c()
big.s.Etapa = c()
big.s.Inter = c()
big.summary = c()
for(ch in 1:n.canales){
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
#aov
k = summary(aov)
k = k[[1]]
#pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
#ph_etapa = pos_hoc[['Etapa']]
#ph_grupo = pos_hoc[['Grupo']]
#ph_inter = pos_hoc[['Grupo:Etapa']]
#p = ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
# theme_classic2() +
# labs(linetype=ch.actual)+
# geom_line(aes(group=Etapa))+
# geom_point()
#print(p)
#q = model.tables(aov.EEG,'means')
#q = model.tables(aov,'means',se=T)
#qt = q[["tables"]]
#qs = q[["se"]]
#qt.Grupo = qt$`factor(Grupo)`
#qt.Etapa = qt$`factor(Etapa)`
#qt.Inter = qt$`factor(Grupo:Etapa)`
#big.m.Grupo = rbind(big.m.Grupo,qt.Grupo)
#big.m.Etapa = rbind(big.m.Etapa,qt.Etapa)
#big.m.Inter = rbind(big.m.Inter,qt.Inter)
qs = summarySE(data=tmp,groupvars=c('Grupo','Etapa'),
measurevar='Hurst')
qs2 = unlist(t(qs))
qs2 = as.list((qs2))
qs2 = unlist(t(qs2))
big.summary = rbind(big.summary,qs2)
#invisible(readline(prompt="Presion [enter] para continuar"))
}
ch = 9
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov.EEG = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
aov.EEG
print(summary(aov.EEG))
#plot(aov.EEG)
model.tables(aov.EEG)
pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
ph_etapa = pos_hoc[['Etapa']]
ph_grupo = pos_hoc[['Grupo']]
ph_inter = pos_hoc[['Grupo:Etapa']]
ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
labs(title=ch.actual)+
theme_classic2() +
ylab('Hurst Exponent') + xlab(NULL)+
labs(linetype=NULL)+
#coord_cartesian(ylim=c(1.2,1.5))+
theme(legend.position=c(1,1),legend.direction = 'horizontal',
legend.justification=c(1,0))+
geom_line(aes(group=Etapa))+
geom_point()
ggsave(filename = 'ANOVAS_3.png',device='png',height = 4,width = 4,
path=dir_graf)
biggr = rbind(biggr,tmp.m)
#stop()
ggplot(biggr,aes(x=Grupo,y=Hurst,linetype=Etapa))+
#labs(title=ch.actual)+
#labs(title=' ')+
theme_classic2() +
ylab('Hurst Exponent') + xlab(NULL)+
labs(linetype=NULL)+
coord_cartesian(ylim=c(1.2,1.6))+
#theme(legend.position=c(1,1),legend.direction = 'horizontal',
# legend.justification=c(1,0))+
theme(legend.position = 'top')+
facet_grid(.~Canal_var) +
theme(strip.background = element_blank())+
geom_line(aes(group=Etapa))+
geom_point()
ggsave(filename = 'ANOVAS_ojos.png',device='png',height = 4,width = 6,
path=dir_graf,dpi=400)
#ggplot(biggr,aes(x=Etapa,y=Hurst,linetype=Grupo))+
# #labs(title=ch.actual)+
# #labs(title=' ')+
# theme_classic2() +
# ylab('Hurst Exponent') + xlab(NULL)+
# labs(linetype=NULL)+
# coord_cartesian(ylim=c(1.2,1.6))+
# #theme(legend.position=c(1,1),legend.direction = 'horizontal',
# # legend.justification=c(1,0))+
# theme(legend.position = 'top')+
# facet_grid(.~Canal_var) +
# theme(strip.background = element_blank())+
# geom_line(aes(group=Grupo))+
# geom_point()
#ggsave(filename = 'ANOVAS_ojos_2.png',device='png',height = 4,width = 6,
# path=dir_graf,dpi=400)
ggplot(biggr,aes(x=Etapa,y=Hurst))+
theme_classic2() +
ylab('Hurst Exponent') + xlab(NULL)+
labs(linetype=NULL)+
coord_cartesian(ylim=c(1.2,1.6))+
theme(legend.position = 'top')+
facet_grid(.~Canal_var) +
theme(strip.background = element_blank())+
geom_line(aes(group=Grupo,linetype=Grupo))+
geom_errorbar(aes(ymin=Hurst-se,ymax=Hurst+se),width=.1,
color='grey40') +
geom_point()
ggsave(filename = 'Fig03_ANOVAS.png',device='png',height = 6,width = 8,
unit='cm',
path=dir_graf,dpi=400,scale=2)
#stop()
big.Grupo = c()
big.Etapa = c()
big.Inter = c()
ch = 1
for(ch in 1:length(kanales$Etiqueta)){
ch.actual = kanales$Etiqueta[ch]
print(ch.actual)
tmp = Hurst.todo.promedio[grep(ch.actual,Hurst.todo.promedio$Canal),]
tmp.m = promedios.todo[grep(ch.actual,promedios.todo$Canal),]
aov.EEG = aov(Hurst ~ factor(Grupo) + factor(Etapa) + factor(Grupo:Etapa),
data=tmp)
aov.EEG
#print(summary(aov.EEG))
#plot(aov.EEG)
model.tables(aov.EEG)
k = summary(aov.EEG)
k = k[[1]]
pos_hoc = TukeyHSD(x=aov.EEG,conf.level=0.95)
ph_etapa = pos_hoc[['Etapa']]
ph_grupo = pos_hoc[['Grupo']]
ph_inter = pos_hoc[['Grupo:Etapa']]
p = ggplot(tmp.m,aes(x=Grupo,y=Hurst,linetype=Etapa))+
theme_classic2() +
labs(linetype=ch.actual)+
geom_line(aes(group=Etapa))+
geom_point()
#print(p)
q = model.tables(aov.EEG,'means')
qt = q[["tables"]]
q.Grupo = qt$`factor(Grupo)`
q.Etapa = qt$`factor(Etapa)`
q.Inter = qt$`factor(Grupo:Etapa)`
big.Grupo = rbind(big.Grupo,q.Grupo)
big.Etapa = rbind(big.Etapa,q.Etapa)
big.Inter = rbind(big.Inter,q.Inter)
invisible(readline(prompt="Presion [enter] para continuar"))
}
big.Grupo = as.data.frame(big.Grupo)
big.Etapa = as.data.frame(big.Etapa)
big.Inter = as.data.frame(big.Inter)
big.Grupo$Canal = kanales$Etiqueta
big.Etapa$Canal = kanales$Etiqueta
big.Inter$Canal = kanales$Etiqueta
|
# data import and cleaning
library(rstudioapi)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) #Set working director to where this document is
raw_df <- read.csv("../data/week3.csv")
raw_df$timeStart <- as.POSIXct(raw_df$timeStart)
raw_df$timeEnd <- as.POSIXct(raw_df$timeEnd)
clean_df <- raw_df[raw_df$timeStart >= as.POSIXct("2017-07-01"),] #TRIED clean_df <- raw_df$timeStart[grep("2017-06-", raw_df$timeStart)]
clean_df <- clean_df[clean_df$q6 == "1",]
# Analysis
clean_df$timeSpent <- difftime(clean_df$timeEnd,clean_df$timeStart, units="secs") # ALTERNATIVELY: clean_df$timeSpent <- (unclass(clean_df$timeEnd)) - (unclass(clean_df$timeStart))
hist(as.numeric(clean_df$timeSpent))
frequency_tables_list <- lapply (clean_df[,5:14], table)
lapply (frequency_tables_list, barplot)
sum ((clean_df$q1 >= clean_df$q2) & (clean_df$q2 != clean_df$q3))
| /Module_3/R/Module_3.R | no_license | WillEddy/Data-Science-for-Social-Scientists | R | false | false | 884 | r | # data import and cleaning
library(rstudioapi)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) #Set working director to where this document is
raw_df <- read.csv("../data/week3.csv")
raw_df$timeStart <- as.POSIXct(raw_df$timeStart)
raw_df$timeEnd <- as.POSIXct(raw_df$timeEnd)
clean_df <- raw_df[raw_df$timeStart >= as.POSIXct("2017-07-01"),] #TRIED clean_df <- raw_df$timeStart[grep("2017-06-", raw_df$timeStart)]
clean_df <- clean_df[clean_df$q6 == "1",]
# Analysis
clean_df$timeSpent <- difftime(clean_df$timeEnd,clean_df$timeStart, units="secs") # ALTERNATIVELY: clean_df$timeSpent <- (unclass(clean_df$timeEnd)) - (unclass(clean_df$timeStart))
hist(as.numeric(clean_df$timeSpent))
frequency_tables_list <- lapply (clean_df[,5:14], table)
lapply (frequency_tables_list, barplot)
sum ((clean_df$q1 >= clean_df$q2) & (clean_df$q2 != clean_df$q3))
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/facet-wrap.r
\name{facet_wrap}
\alias{facet_wrap}
\title{Wrap a 1d ribbon of panels into 2d.}
\usage{
facet_wrap(facets, nrow = NULL, ncol = NULL, scales = "fixed",
shrink = TRUE, labeller = "label_value", as.table = TRUE,
switch = NULL, drop = TRUE)
}
\arguments{
\item{facets}{Either a formula or character vector. Use either a
one sided formula, \code{~a + b}, or a character vector, \code{c("a", "b")}.}
\item{nrow,ncol}{Number of rows and columns.}
\item{scales}{should Scales be fixed (\code{"fixed"}, the default),
free (\code{"free"}), or free in one dimension (\code{"free_x"},
\code{"free_y"}).}
\item{shrink}{If \code{TRUE}, will shrink scales to fit output of
statistics, not raw data. If \code{FALSE}, will be range of raw data
before statistical summary.}
\item{labeller}{A function that takes one data frame of labels and
returns a list or data frame of character vectors. Each input
column corresponds to one factor. Thus there will be more than
one with formulae of the type \code{~cyl + am}. Each output
column gets displayed as one separate line in the strip
label. This function should inherit from the "labeller" S3 class
for compatibility with \code{\link{labeller}()}. See
\code{\link{label_value}} for more details and pointers to other
options.}
\item{as.table}{If \code{TRUE}, the default, the facets are laid out like
a table with highest values at the bottom-right. If \code{FALSE}, the
facets are laid out like a plot with the highest value at the top-right.}
\item{switch}{By default, the labels are displayed on the top of
the plot. If \code{switch} is \code{"x"}, they will be displayed
to the bottom. If \code{"y"}, they will be displayed to the
left, near the y axis.}
\item{drop}{If \code{TRUE}, the default, all factor levels not used in the
data will automatically be dropped. If \code{FALSE}, all factor levels
will be shown, regardless of whether or not they appear in the data.}
}
\description{
Most displays are roughly rectangular, so if you have a categorical
variable with many levels, it doesn't make sense to try and display them
all in one row (or one column). To solve this dilemma, \code{facet_wrap}
wraps a 1d sequence of panels into 2d, making best use of screen real estate.
}
\examples{
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~class)
# Control the number of rows and columns with nrow and ncol
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~class, nrow = 4)
# You can facet by multiple variables
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~ cyl + drv)
# Or use a character vector:
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(c("cyl", "drv"))
# Use the `labeller` option to control how labels are printed:
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(c("cyl", "drv"), labeller = "label_both")
# To change the order in which the panels appear, change the levels
# of the underlying factor.
mpg$class2 <- reorder(mpg$class, mpg$displ)
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~class2)
# By default, the same scales are used for all panels. You can allow
# scales to vary across the panels with the `scales` argument.
# Free scales make it easier to see patterns within each panel, but
# harder to compare across panels.
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~class, scales = "free")
# To repeat the same data in every panel, simply construct a data frame
# that does not contain the facetting variable.
ggplot(mpg, aes(displ, hwy)) +
geom_point(data = transform(mpg, class = NULL), colour = "grey85") +
geom_point() +
facet_wrap(~class)
# Use `switch` to display the facet labels near an axis, acting as
# a subtitle for this axis. This is typically used with free scales
# and a theme without boxes around strip labels.
ggplot(economics_long, aes(date, value)) +
geom_line() +
facet_wrap(~variable, scales = "free_y", nrow = 2, switch = "x") +
theme(strip.background = element_blank())
}
| /man/facet_wrap.Rd | no_license | bbolker/ggplot2 | R | false | false | 4,087 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/facet-wrap.r
\name{facet_wrap}
\alias{facet_wrap}
\title{Wrap a 1d ribbon of panels into 2d.}
\usage{
facet_wrap(facets, nrow = NULL, ncol = NULL, scales = "fixed",
shrink = TRUE, labeller = "label_value", as.table = TRUE,
switch = NULL, drop = TRUE)
}
\arguments{
\item{facets}{Either a formula or character vector. Use either a
one sided formula, \code{~a + b}, or a character vector, \code{c("a", "b")}.}
\item{nrow,ncol}{Number of rows and columns.}
\item{scales}{should Scales be fixed (\code{"fixed"}, the default),
free (\code{"free"}), or free in one dimension (\code{"free_x"},
\code{"free_y"}).}
\item{shrink}{If \code{TRUE}, will shrink scales to fit output of
statistics, not raw data. If \code{FALSE}, will be range of raw data
before statistical summary.}
\item{labeller}{A function that takes one data frame of labels and
returns a list or data frame of character vectors. Each input
column corresponds to one factor. Thus there will be more than
one with formulae of the type \code{~cyl + am}. Each output
column gets displayed as one separate line in the strip
label. This function should inherit from the "labeller" S3 class
for compatibility with \code{\link{labeller}()}. See
\code{\link{label_value}} for more details and pointers to other
options.}
\item{as.table}{If \code{TRUE}, the default, the facets are laid out like
a table with highest values at the bottom-right. If \code{FALSE}, the
facets are laid out like a plot with the highest value at the top-right.}
\item{switch}{By default, the labels are displayed on the top of
the plot. If \code{switch} is \code{"x"}, they will be displayed
to the bottom. If \code{"y"}, they will be displayed to the
left, near the y axis.}
\item{drop}{If \code{TRUE}, the default, all factor levels not used in the
data will automatically be dropped. If \code{FALSE}, all factor levels
will be shown, regardless of whether or not they appear in the data.}
}
\description{
Most displays are roughly rectangular, so if you have a categorical
variable with many levels, it doesn't make sense to try and display them
all in one row (or one column). To solve this dilemma, \code{facet_wrap}
wraps a 1d sequence of panels into 2d, making best use of screen real estate.
}
\examples{
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~class)
# Control the number of rows and columns with nrow and ncol
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~class, nrow = 4)
# You can facet by multiple variables
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~ cyl + drv)
# Or use a character vector:
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(c("cyl", "drv"))
# Use the `labeller` option to control how labels are printed:
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(c("cyl", "drv"), labeller = "label_both")
# To change the order in which the panels appear, change the levels
# of the underlying factor.
mpg$class2 <- reorder(mpg$class, mpg$displ)
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~class2)
# By default, the same scales are used for all panels. You can allow
# scales to vary across the panels with the `scales` argument.
# Free scales make it easier to see patterns within each panel, but
# harder to compare across panels.
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~class, scales = "free")
# To repeat the same data in every panel, simply construct a data frame
# that does not contain the facetting variable.
ggplot(mpg, aes(displ, hwy)) +
geom_point(data = transform(mpg, class = NULL), colour = "grey85") +
geom_point() +
facet_wrap(~class)
# Use `switch` to display the facet labels near an axis, acting as
# a subtitle for this axis. This is typically used with free scales
# and a theme without boxes around strip labels.
ggplot(economics_long, aes(date, value)) +
geom_line() +
facet_wrap(~variable, scales = "free_y", nrow = 2, switch = "x") +
theme(strip.background = element_blank())
}
|
#' @rdname run_gibbsflow_ais
#' @title Run Gibbs flow annealed importance sampler
#' @param prior list with keys:
#' \code{logdensity} evaluates log prior density,
#' \code{gradlogdensity} returns its gradient,
#' \code{rinit} samples from the prior distribution
#' @param likelihood list with keys:
#' \code{logdensity} samples from proposal,
#' \code{gradlogdensity} returns its gradient
#' @param nparticles number of particles
#' @param timegrid vector describing numerical integration times
#' @param lambda vector describing tempering schedule
#' @param derivative_lambda time derivative of tempering schedule
#' @param compute_gibbsflow function computing Gibbs flow
#' @param mcmc list with keys:
#' \code{choice} specifies type of MCMC method,
#' \code{parameters} specifies algorithmic tuning parameters,
#' \code{nmoves} specifies number of MCMC move per temperature
#' @return list with keys:
#' \code{xtrajectory} trajectories,
#' \code{xparticles} particles at terminal time,
#' \code{ess} effective sample size,
#' \code{log_normconst} log normalizing constant,
#' \code{acceptprob} MCMC acceptance probabilities
#' @seealso \code{\link{run_gibbsflow_smc}} if resampling is desired
#' @export
run_gibbsflow_ais <- function(prior, likelihood, nparticles, timegrid, lambda, derivative_lambda, compute_gibbsflow, mcmc){
# initialization
xparticles <- prior$rinit(nparticles)
previous_logdensity <- prior$logdensity(xparticles)
# pre-allocate
dimension <- ncol(xparticles)
nsteps <- length(lambda) # same length as timegrid
stepsize <- diff(timegrid)
xtrajectory <- array(dim = c(nparticles, dimension, nsteps))
xtrajectory[ , , 1] <- xparticles
logweights <- rep(0, nparticles)
ess <- rep(0, nsteps)
ess[1] <- nparticles
log_normconst <- rep(0, nsteps)
acceptprob <- matrix(nrow = 2, ncol = nsteps)
acceptprob[ , 1] <- c(1, 1)
for (istep in 2:nsteps){
# gibbs flow move
output_flow <- compute_gibbsflow(stepsize[istep-1], lambda[istep-1], derivative_lambda[istep-1],
xparticles, previous_logdensity)
xparticles <- output_flow$xparticles
log_jacobian_dets <- as.numeric(output_flow$log_jacobian_dets)
# weight
current_logdensity <- prior$logdensity(xparticles) + lambda[istep] * likelihood$logdensity(xparticles)
logweights <- logweights + current_logdensity - previous_logdensity + log_jacobian_dets
maxlogweights <- max(logweights)
weights <- exp(logweights - maxlogweights)
normweights <- weights / sum(weights)
# compute effective sample size
ess[istep] <- 1 / sum(normweights^2)
# compute normalizing constant
log_normconst[istep] <- log(mean(weights)) + maxlogweights
# MCMC
current_logtarget <- function(x) prior$logdensity(x) + lambda[istep] * likelihood$logdensity(x)
current_gradlogtarget <- function(x) prior$gradlogdensity(x) + lambda[istep] * likelihood$gradlogdensity(x)
transition_kernel <- construct_kernel(current_logtarget, current_gradlogtarget, mcmc)
current_acceptprob <- rep(0, mcmc$nmoves)
for (imove in 1:mcmc$nmoves){
mcmc_output <- transition_kernel(xparticles, current_logdensity)
xparticles <- mcmc_output$x
current_logdensity <- mcmc_output$logtarget_x
current_acceptprob[imove] <- mcmc_output$acceptprob
}
acceptprob[ , istep] <- c(min(current_acceptprob), max(current_acceptprob))
# store trajectory
xtrajectory[ , , istep] <- xparticles
previous_logdensity <- current_logdensity
}
return(list(xtrajectory = xtrajectory, xparticles = xparticles, ess = ess,
log_normconst = log_normconst, acceptprob = acceptprob))
}
| /R/run_gibbsflow_ais.R | no_license | TorbenSell/GibbsFlow | R | false | false | 3,734 | r | #' @rdname run_gibbsflow_ais
#' @title Run Gibbs flow annealed importance sampler
#' @param prior list with keys:
#' \code{logdensity} evaluates log prior density,
#' \code{gradlogdensity} returns its gradient,
#' \code{rinit} samples from the prior distribution
#' @param likelihood list with keys:
#' \code{logdensity} samples from proposal,
#' \code{gradlogdensity} returns its gradient
#' @param nparticles number of particles
#' @param timegrid vector describing numerical integration times
#' @param lambda vector describing tempering schedule
#' @param derivative_lambda time derivative of tempering schedule
#' @param compute_gibbsflow function computing Gibbs flow
#' @param mcmc list with keys:
#' \code{choice} specifies type of MCMC method,
#' \code{parameters} specifies algorithmic tuning parameters,
#' \code{nmoves} specifies number of MCMC move per temperature
#' @return list with keys:
#' \code{xtrajectory} trajectories,
#' \code{xparticles} particles at terminal time,
#' \code{ess} effective sample size,
#' \code{log_normconst} log normalizing constant,
#' \code{acceptprob} MCMC acceptance probabilities
#' @seealso \code{\link{run_gibbsflow_smc}} if resampling is desired
#' @export
run_gibbsflow_ais <- function(prior, likelihood, nparticles, timegrid, lambda, derivative_lambda, compute_gibbsflow, mcmc){
# initialization
xparticles <- prior$rinit(nparticles)
previous_logdensity <- prior$logdensity(xparticles)
# pre-allocate
dimension <- ncol(xparticles)
nsteps <- length(lambda) # same length as timegrid
stepsize <- diff(timegrid)
xtrajectory <- array(dim = c(nparticles, dimension, nsteps))
xtrajectory[ , , 1] <- xparticles
logweights <- rep(0, nparticles)
ess <- rep(0, nsteps)
ess[1] <- nparticles
log_normconst <- rep(0, nsteps)
acceptprob <- matrix(nrow = 2, ncol = nsteps)
acceptprob[ , 1] <- c(1, 1)
for (istep in 2:nsteps){
# gibbs flow move
output_flow <- compute_gibbsflow(stepsize[istep-1], lambda[istep-1], derivative_lambda[istep-1],
xparticles, previous_logdensity)
xparticles <- output_flow$xparticles
log_jacobian_dets <- as.numeric(output_flow$log_jacobian_dets)
# weight
current_logdensity <- prior$logdensity(xparticles) + lambda[istep] * likelihood$logdensity(xparticles)
logweights <- logweights + current_logdensity - previous_logdensity + log_jacobian_dets
maxlogweights <- max(logweights)
weights <- exp(logweights - maxlogweights)
normweights <- weights / sum(weights)
# compute effective sample size
ess[istep] <- 1 / sum(normweights^2)
# compute normalizing constant
log_normconst[istep] <- log(mean(weights)) + maxlogweights
# MCMC
current_logtarget <- function(x) prior$logdensity(x) + lambda[istep] * likelihood$logdensity(x)
current_gradlogtarget <- function(x) prior$gradlogdensity(x) + lambda[istep] * likelihood$gradlogdensity(x)
transition_kernel <- construct_kernel(current_logtarget, current_gradlogtarget, mcmc)
current_acceptprob <- rep(0, mcmc$nmoves)
for (imove in 1:mcmc$nmoves){
mcmc_output <- transition_kernel(xparticles, current_logdensity)
xparticles <- mcmc_output$x
current_logdensity <- mcmc_output$logtarget_x
current_acceptprob[imove] <- mcmc_output$acceptprob
}
acceptprob[ , istep] <- c(min(current_acceptprob), max(current_acceptprob))
# store trajectory
xtrajectory[ , , istep] <- xparticles
previous_logdensity <- current_logdensity
}
return(list(xtrajectory = xtrajectory, xparticles = xparticles, ess = ess,
log_normconst = log_normconst, acceptprob = acceptprob))
}
|
### GENERIC TREE R SCRIPT ###
#rdir = "/home/re1u06/researchfiles/SBSBINF/Tools/svn/libraries/r/"
#rdir = "/home/re1u06/Tools/libraries/r/"
#rdir = "/home/redwards/Serpentry/"
#rdir = "/data/ben/Serpentry/"
rdir = "/Users/redwards/Dropbox/_Repository_/slimsuite/libraries/r/"
rjesource = function(rfile){
source(paste(rdir,rfile,sep=""))
}
rjesource("rje_col.r")
rjesource("rje_misc.r")
### ~ COMMANDLINE ARGUMENTS ~ ###
args <- commandArgs(TRUE)
(file = args[1])
(outfile = args[2]) # "KCMA1.png"
if (length(args) > 2) { treetitle = args[3]; }
### ~ SETUP ~ ###
#library(Cairo) # Not for bioinf.soton.ac.uk
tree = read.csv(file)
ynum = length(tree$xpos)
x = 1 / (2*max(tree$xpos))
y = 1 / (ynum+1)
### Families and colours ###
fcolist = c(1:4,8:21)
tree$fcol = "black" # soton$col[1] #"black"
#flvl = levels(as.factor(tree[tree$family != "EHUX",]$family))
flvl = levels(as.factor(tree$family))
if(length(flvl) > 0){
for (f in 1:length(flvl)){ tree[tree$family == flvl[f],]$fcol = soton$col[fcolist[f]] }
}
#~special~#
#tree[tree$family == "EHUX",]$fcol = soton$col[5]
### ~ Setup Plot ~ ###
pngwidth = 1600
yex = 100 #38
ypx = 20 #50
mex = 2 #1
if (length(args) > 3) { pngwidth = as.integer(args[4]); }
png(filename = outfile, width = pngwidth, height = max(300,(ynum+2)*ypx), units = "px", pointsize=14)
#CairoPNG(filename=outfile, width = pngwidth, height = max(300,(ynum+2)*ypx), pointsize=25)
plot(0:1,0:1,type="n",axes=FALSE,ann=FALSE,mar=c(0,1,4,1))
if (length(args) > 2) { title(main=treetitle); }
### ~ Draw Tree ~ ###
for (i in 1:ynum){
data = tree[i,]
lines(c(data$xpos*x,data$ancx*x),c(1-data$ypos*y,1-data$ypos*y),col=data$fcol)
lines(c(data$ancx*x,data$ancx*x),c(1-data$ypos*y,1-data$ancy*y),col=data$fcol)
}
### ~ Add Text ~ ###
for (i in 1:ynum){
data = tree[i,]
if (data$nodenum <= ((ynum+1) / 2)){
#text((data$xpos)*x+0.01,1-data$ypos*y,data$name,adj=c(0,0.5),cex=min(mex,(yex+2)/ynum),col=data$fcol)
text((data$xpos)*x+0.01,1-data$ypos*y,data$name,adj=c(0,0.5),col=data$fcol)
}else{
#text((data$xpos)*x-0.01,1+(0.45/ynum)-data$ypos*y,data$boot,adj=c(1,0),cex=min(mex,yex/ynum),col=soton$col[5])
text((data$xpos)*x-0.01,1+(0.45/ynum)-data$ypos*y,data$boot,adj=c(1,0),cex=0.8,col=soton$col[5])
}
}
### ~ Add scale ~ ###
lines(c(0,0.1*x),c(0,0),col=soton$col[7])
lines(c(0,0),c(0,-0.005),col=soton$col[7])
lines(c(0.1*x,0.1*x),c(0,-0.005),col=soton$col[7])
#text(0,-0.01,"0",adj=c(0.5,1),cex=min(mex,yex/ynum),col=soton$col[7],xpd=NA)
#text(0.1*x,-0.01,"0.1",adj=c(0.5,1),cex=min(mex,yex/ynum),col=soton$col[7],xpd=NA)
text(0,-0.01,"0",adj=c(0.5,1),col=soton$col[7],xpd=NA)
text(0.1*x,-0.01,"0.1",adj=c(0.5,1),col=soton$col[7],xpd=NA)
dev.off()
| /libraries/r/rje_tree.r | no_license | NPalopoli/SLiMSuite | R | false | false | 2,731 | r | ### GENERIC TREE R SCRIPT ###
#rdir = "/home/re1u06/researchfiles/SBSBINF/Tools/svn/libraries/r/"
#rdir = "/home/re1u06/Tools/libraries/r/"
#rdir = "/home/redwards/Serpentry/"
#rdir = "/data/ben/Serpentry/"
rdir = "/Users/redwards/Dropbox/_Repository_/slimsuite/libraries/r/"
rjesource = function(rfile){
source(paste(rdir,rfile,sep=""))
}
rjesource("rje_col.r")
rjesource("rje_misc.r")
### ~ COMMANDLINE ARGUMENTS ~ ###
args <- commandArgs(TRUE)
(file = args[1])
(outfile = args[2]) # "KCMA1.png"
if (length(args) > 2) { treetitle = args[3]; }
### ~ SETUP ~ ###
#library(Cairo) # Not for bioinf.soton.ac.uk
tree = read.csv(file)
ynum = length(tree$xpos)
x = 1 / (2*max(tree$xpos))
y = 1 / (ynum+1)
### Families and colours ###
fcolist = c(1:4,8:21)
tree$fcol = "black" # soton$col[1] #"black"
#flvl = levels(as.factor(tree[tree$family != "EHUX",]$family))
flvl = levels(as.factor(tree$family))
if(length(flvl) > 0){
for (f in 1:length(flvl)){ tree[tree$family == flvl[f],]$fcol = soton$col[fcolist[f]] }
}
#~special~#
#tree[tree$family == "EHUX",]$fcol = soton$col[5]
### ~ Setup Plot ~ ###
pngwidth = 1600
yex = 100 #38
ypx = 20 #50
mex = 2 #1
if (length(args) > 3) { pngwidth = as.integer(args[4]); }
png(filename = outfile, width = pngwidth, height = max(300,(ynum+2)*ypx), units = "px", pointsize=14)
#CairoPNG(filename=outfile, width = pngwidth, height = max(300,(ynum+2)*ypx), pointsize=25)
plot(0:1,0:1,type="n",axes=FALSE,ann=FALSE,mar=c(0,1,4,1))
if (length(args) > 2) { title(main=treetitle); }
### ~ Draw Tree ~ ###
for (i in 1:ynum){
data = tree[i,]
lines(c(data$xpos*x,data$ancx*x),c(1-data$ypos*y,1-data$ypos*y),col=data$fcol)
lines(c(data$ancx*x,data$ancx*x),c(1-data$ypos*y,1-data$ancy*y),col=data$fcol)
}
### ~ Add Text ~ ###
for (i in 1:ynum){
data = tree[i,]
if (data$nodenum <= ((ynum+1) / 2)){
#text((data$xpos)*x+0.01,1-data$ypos*y,data$name,adj=c(0,0.5),cex=min(mex,(yex+2)/ynum),col=data$fcol)
text((data$xpos)*x+0.01,1-data$ypos*y,data$name,adj=c(0,0.5),col=data$fcol)
}else{
#text((data$xpos)*x-0.01,1+(0.45/ynum)-data$ypos*y,data$boot,adj=c(1,0),cex=min(mex,yex/ynum),col=soton$col[5])
text((data$xpos)*x-0.01,1+(0.45/ynum)-data$ypos*y,data$boot,adj=c(1,0),cex=0.8,col=soton$col[5])
}
}
### ~ Add scale ~ ###
lines(c(0,0.1*x),c(0,0),col=soton$col[7])
lines(c(0,0),c(0,-0.005),col=soton$col[7])
lines(c(0.1*x,0.1*x),c(0,-0.005),col=soton$col[7])
#text(0,-0.01,"0",adj=c(0.5,1),cex=min(mex,yex/ynum),col=soton$col[7],xpd=NA)
#text(0.1*x,-0.01,"0.1",adj=c(0.5,1),cex=min(mex,yex/ynum),col=soton$col[7],xpd=NA)
text(0,-0.01,"0",adj=c(0.5,1),col=soton$col[7],xpd=NA)
text(0.1*x,-0.01,"0.1",adj=c(0.5,1),col=soton$col[7],xpd=NA)
dev.off()
|
library(dplyr)
# read data
NEI <- readRDS("exdata-data-NEI_data/summarySCC_PM25.rds")
# convert to tibble for easier manipulation
NEI <- as_tibble(NEI)
# calculate total PM2.5 emission by year
NEI_year <- NEI %>%
group_by(year) %>%
summarise(sum(Emissions,na.rm=TRUE)) %>%
print
colnames(NEI_year) <- c("year","emission_sum")
# plot total PM2.5 emission by year
with(NEI_year,barplot(emission_sum, names.arg = year, width= rep(5,4),col = "light blue",
ylab ="Total PM2.5 emission", xlab = "Year", ylim = c(0,max(emission_sum)+10000)))
title(main = "Total PM2.5 emission from all sources \n by year")
dev.copy(png,file = "plot1.png")
dev.off() | /plot1.R | no_license | ttc142/coursera_eda2 | R | false | false | 662 | r | library(dplyr)
# read data
NEI <- readRDS("exdata-data-NEI_data/summarySCC_PM25.rds")
# convert to tibble for easier manipulation
NEI <- as_tibble(NEI)
# calculate total PM2.5 emission by year
NEI_year <- NEI %>%
group_by(year) %>%
summarise(sum(Emissions,na.rm=TRUE)) %>%
print
colnames(NEI_year) <- c("year","emission_sum")
# plot total PM2.5 emission by year
with(NEI_year,barplot(emission_sum, names.arg = year, width= rep(5,4),col = "light blue",
ylab ="Total PM2.5 emission", xlab = "Year", ylim = c(0,max(emission_sum)+10000)))
title(main = "Total PM2.5 emission from all sources \n by year")
dev.copy(png,file = "plot1.png")
dev.off() |
library(dplyr)
localfile <- "./data/extdata-data-household_power_consumption.zip"
if(!file.exists(localfile)){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", dest=localfile, mode="wb")
}
unzipdir <- "./data/extdataproj"
if (list.files(unzipdir) != unzip(localfile, list=TRUE)$Name){
unzip(localfile, exdir=unzipdir)
print("unzipped")
}
filename <- unzip(localfile, list=TRUE)$Name
getrows = -1
if (!exists("dnp")){
dnp <- read.table(paste(unzipdir, filename, sep="/"), na.strings="?", sep=";", header=TRUE, nrows=getrows, stringsAsFactors=FALSE) ##fails so maybe need to fix this in the txt file
dnp$DateDate <- as.Date(paste(dnp$Date), "%d/%m/%Y") # returns invalid date without formatting
dnp$stptimeT <- as.POSIXct(strptime(paste(dnp$Date, dnp$Time, sep=" "), "%d/%m/%Y %H:%M:%S"))
dnp$testdt <- paste(dnp$Date, dnp$Time, sep=" ")
dnp <- tbl_df(dnp)
dnp <- dnp %>% filter(DateDate == as.Date("2007-02-01") | DateDate == as.Date("2007-02-02"))
}
Sys.setlocale("LC_TIME", "English")
png("plot2.png",width=480,height=480,units="px")
with(dnp, plot(stptimeT, Global_active_power, type="l", col="black", ylab="Global Active Power (kilowatts)", xlab=""))
dev.off()
Sys.setlocale("LC_TIME", "Dutch") | /plot2.R | no_license | jazzfree/ExData_Plotting1 | R | false | false | 1,277 | r | library(dplyr)
localfile <- "./data/extdata-data-household_power_consumption.zip"
if(!file.exists(localfile)){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", dest=localfile, mode="wb")
}
unzipdir <- "./data/extdataproj"
if (list.files(unzipdir) != unzip(localfile, list=TRUE)$Name){
unzip(localfile, exdir=unzipdir)
print("unzipped")
}
filename <- unzip(localfile, list=TRUE)$Name
getrows = -1
if (!exists("dnp")){
dnp <- read.table(paste(unzipdir, filename, sep="/"), na.strings="?", sep=";", header=TRUE, nrows=getrows, stringsAsFactors=FALSE) ##fails so maybe need to fix this in the txt file
dnp$DateDate <- as.Date(paste(dnp$Date), "%d/%m/%Y") # returns invalid date without formatting
dnp$stptimeT <- as.POSIXct(strptime(paste(dnp$Date, dnp$Time, sep=" "), "%d/%m/%Y %H:%M:%S"))
dnp$testdt <- paste(dnp$Date, dnp$Time, sep=" ")
dnp <- tbl_df(dnp)
dnp <- dnp %>% filter(DateDate == as.Date("2007-02-01") | DateDate == as.Date("2007-02-02"))
}
Sys.setlocale("LC_TIME", "English")
png("plot2.png",width=480,height=480,units="px")
with(dnp, plot(stptimeT, Global_active_power, type="l", col="black", ylab="Global Active Power (kilowatts)", xlab=""))
dev.off()
Sys.setlocale("LC_TIME", "Dutch") |
#This data is a collection of information about the number of male and female months in an area of South Wales
#There are 16 Colonies of moths and we are interested in estimating the True mean proportion of Female Moths in population
#The function of the Moths' genders is binomial where each moth is either Female or Not Female
#These are the numbers of females and males in each of the 16 colonies of Moths
Females <-c(18,31,34,33,27,33,28,23,33,12,19,25,14,4,22,7)
Males<-c(11,22,27,29,24,29,25,26,38,14,23,31,20,6,34,12)
Y<-Females
N<-Females+Males
# This is a vector containing the proportion of Female Moths for each colony
Y/N
# This is the total proportion of female moths across all colonies
sum(Y)/sum(N)
# Formula for calculating the loglikelyhood of a given proportion(p) of Female Moths
logL<-function(p) sum(dbinom(Females,N,p,log=T))
#Find the Proportion with the Maximum Likelyhood usingthe optimize function: It gives approx. 50%
optimize(logL,lower=0,upper=1,maximum=TRUE)
#Interactive Plot to illustrate that
########################################################################
require(manipulate)
p.seq<-seq(0.01,0.99,0.01) # Range of Probabilities with minimum of 0 and max of 1
manipulate(
plot(p.seq,sapply(p.seq,logL),type="l",ylab = "Log Likelyhood of Proportion(Moths)", xlab= "Proportion of Female Moths")+abline(v=Probability, col="red"),
Probability=slider(0.0,1.0,step = 0.1,initial = 0.5) # Slider T oshow a Line at the selected probability
)
| /R_Files/AQM/Weekly Materials/Week 4/Loglikelyhood on gender populations.r | no_license | menquist/Michael_Enquist | R | false | false | 1,537 | r |
#This data is a collection of information about the number of male and female months in an area of South Wales
#There are 16 Colonies of moths and we are interested in estimating the True mean proportion of Female Moths in population
#The function of the Moths' genders is binomial where each moth is either Female or Not Female
#These are the numbers of females and males in each of the 16 colonies of Moths
Females <-c(18,31,34,33,27,33,28,23,33,12,19,25,14,4,22,7)
Males<-c(11,22,27,29,24,29,25,26,38,14,23,31,20,6,34,12)
Y<-Females
N<-Females+Males
# This is a vector containing the proportion of Female Moths for each colony
Y/N
# This is the total proportion of female moths across all colonies
sum(Y)/sum(N)
# Formula for calculating the loglikelyhood of a given proportion(p) of Female Moths
logL<-function(p) sum(dbinom(Females,N,p,log=T))
#Find the Proportion with the Maximum Likelyhood usingthe optimize function: It gives approx. 50%
optimize(logL,lower=0,upper=1,maximum=TRUE)
#Interactive Plot to illustrate that
########################################################################
require(manipulate)
p.seq<-seq(0.01,0.99,0.01) # Range of Probabilities with minimum of 0 and max of 1
manipulate(
plot(p.seq,sapply(p.seq,logL),type="l",ylab = "Log Likelyhood of Proportion(Moths)", xlab= "Proportion of Female Moths")+abline(v=Probability, col="red"),
Probability=slider(0.0,1.0,step = 0.1,initial = 0.5) # Slider T oshow a Line at the selected probability
)
|
# get_total_count
get_total_count <- function(dat, departure_name, arrival_name){
data("subway_route")
data("seoul_station")
total <- data.frame(matrix(0,1,468))
colnames(total) <- seoul_station
departure <- dat[,departure_name]
arrival <- dat[,arrival_name]
time.started <- Sys.time()
cat(paste('Started at : ', time.started, ' / ...ing...', sep = ''))
for(i in 1:nrow(dat)){
get <- paste0(departure[i], "-", arrival[i])
get_2 <- paste0(arrival[i], "-", departure[i])
result <- subway_route[[get]]
if(is.null(result)){
result <- subway_route[[get_2]]
}
total[which(seoul_station%in%result$station)] <-
total[which(seoul_station%in%result$station)] + 1
}
total_gather <- total%>%gather(key = "station", value = "count")
time.finished <- Sys.time() # Store finished time
time.elapsed <- time.finished - time.started # Calculate elapsed time
cat(paste('Finished..! / elapsed time : ', time.elapsed, '\n\n', sep = ''))
return(total_gather)
}
| /R/get_total_count.R | no_license | king4k1/seoulsubway | R | false | false | 1,013 | r | # get_total_count
get_total_count <- function(dat, departure_name, arrival_name){
data("subway_route")
data("seoul_station")
total <- data.frame(matrix(0,1,468))
colnames(total) <- seoul_station
departure <- dat[,departure_name]
arrival <- dat[,arrival_name]
time.started <- Sys.time()
cat(paste('Started at : ', time.started, ' / ...ing...', sep = ''))
for(i in 1:nrow(dat)){
get <- paste0(departure[i], "-", arrival[i])
get_2 <- paste0(arrival[i], "-", departure[i])
result <- subway_route[[get]]
if(is.null(result)){
result <- subway_route[[get_2]]
}
total[which(seoul_station%in%result$station)] <-
total[which(seoul_station%in%result$station)] + 1
}
total_gather <- total%>%gather(key = "station", value = "count")
time.finished <- Sys.time() # Store finished time
time.elapsed <- time.finished - time.started # Calculate elapsed time
cat(paste('Finished..! / elapsed time : ', time.elapsed, '\n\n', sep = ''))
return(total_gather)
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "sonar")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.naiveBayes", par.vals = list(laplace = 0), predict.type = "prob")
#:# hash
#:# 553e49e8e415d4dbeb679e29ebacf274
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_sonar/classification_Class/553e49e8e415d4dbeb679e29ebacf274/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 694 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "sonar")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.naiveBayes", par.vals = list(laplace = 0), predict.type = "prob")
#:# hash
#:# 553e49e8e415d4dbeb679e29ebacf274
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
\name{null.breadth}
\alias{null.breadth}
\title{
null.breadth
}
\description{
This function calculates the null expectation of host breadth if herbivores sample diet items randomly.
}
\usage{
null.breadth(dat, dist.method = "jaccard", rep = 100, quantiles = c(0.025, 0.975),
scaled = FALSE)
}
\arguments{
\item{dat}{
A matrix of diet associations. Rows are herbivores and columns are diet items.
}
\item{dist.method}{
Dissimilarity index passed on to \code{vegdist} in the 'vegan' package.
}
\item{rep}{
The number of permutations to generate a null distribution
}
\item{quantiles}{
A vector length of two indicating the lower and upper quantiles to report for the null distribution.
}
\item{scaled}{
A logical indicating whether to report the scaled ordinated host breadth.
}
}
\value{
An array show the lower and upper quantiles of the null distribution for each taxonomic richness
}
\references{
Fordyce, J.A., C.C. Nice, C.A. Hamm, & M.L. Forister. Quantifying diet breadth through ordination of host association. Ecology
}
\author{
James Fordyce
}
\examples{
testdata<-
c(
0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,
1,1,1,0,0,0,0,0,0,0,
0,0,0,0,1,1,0,1,0,1,
1,1,1,0,0,0,1,0,0,0,
1,1,0,0,1,0,1,0,0,0,
0,0,0,1,0,0,1,0,1,1,
1,0,1,0,1,1,0,0,0,1,
1,1,0,0,1,0,0,1,1,1,
1,1,1,0,1,1,0,1,1,1)
dat<-array(dim=c(10,10),data=testdata)
dat<-t(dat)
colnames(dat)<-paste("",LETTERS[1:10],sep="")
rownames(dat)<-paste("bug",1:10,sep="")
null.breadth(dat)
}
| /man/null.breadth.Rd | no_license | butterflyology/ordiBreadth | R | false | false | 1,498 | rd | \name{null.breadth}
\alias{null.breadth}
\title{
null.breadth
}
\description{
This function calculates the null expectation of host breadth if herbivores sample diet items randomly.
}
\usage{
null.breadth(dat, dist.method = "jaccard", rep = 100, quantiles = c(0.025, 0.975),
scaled = FALSE)
}
\arguments{
\item{dat}{
A matrix of diet associations. Rows are herbivores and columns are diet items.
}
\item{dist.method}{
Dissimilarity index passed on to \code{vegdist} in the 'vegan' package.
}
\item{rep}{
The number of permutations to generate a null distribution
}
\item{quantiles}{
A vector length of two indicating the lower and upper quantiles to report for the null distribution.
}
\item{scaled}{
A logical indicating whether to report the scaled ordinated host breadth.
}
}
\value{
An array show the lower and upper quantiles of the null distribution for each taxonomic richness
}
\references{
Fordyce, J.A., C.C. Nice, C.A. Hamm, & M.L. Forister. Quantifying diet breadth through ordination of host association. Ecology
}
\author{
James Fordyce
}
\examples{
testdata<-
c(
0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,
1,1,1,0,0,0,0,0,0,0,
0,0,0,0,1,1,0,1,0,1,
1,1,1,0,0,0,1,0,0,0,
1,1,0,0,1,0,1,0,0,0,
0,0,0,1,0,0,1,0,1,1,
1,0,1,0,1,1,0,0,0,1,
1,1,0,0,1,0,0,1,1,1,
1,1,1,0,1,1,0,1,1,1)
dat<-array(dim=c(10,10),data=testdata)
dat<-t(dat)
colnames(dat)<-paste("",LETTERS[1:10],sep="")
rownames(dat)<-paste("bug",1:10,sep="")
null.breadth(dat)
}
|
/script for PAM data and statistics of RLC parameters.R | no_license | ronenliberman/CC-disrupt-octocoral-reproductive-synchrony | R | false | false | 9,138 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_invcom_data.R
\name{get_invcom_data_list}
\alias{get_invcom_data_list}
\title{scrape Investing.com Finance data}
\usage{
get_invcom_data_list(pjs_session, ticker_tbl, start_date, end_date)
}
\arguments{
\item{pjs_session}{phantom.js session}
\item{ticker_tbl}{tbl_df with ticker, page and url}
\item{start_date}{start date for price data retrieval}
\item{end_date}{end date for price data retrieval}
}
\value{
object of class \code{list} named by each url specific to a ticker-type combination containing scraped data
}
\description{
scrape Investing.com Finance data
}
| /man/get_invcom_data_list.Rd | no_license | ces0491/companyDataScrapeR | R | false | true | 655 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_invcom_data.R
\name{get_invcom_data_list}
\alias{get_invcom_data_list}
\title{scrape Investing.com Finance data}
\usage{
get_invcom_data_list(pjs_session, ticker_tbl, start_date, end_date)
}
\arguments{
\item{pjs_session}{phantom.js session}
\item{ticker_tbl}{tbl_df with ticker, page and url}
\item{start_date}{start date for price data retrieval}
\item{end_date}{end date for price data retrieval}
}
\value{
object of class \code{list} named by each url specific to a ticker-type combination containing scraped data
}
\description{
scrape Investing.com Finance data
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internalHelpers.R
\name{getTimeConversionFactor}
\alias{getTimeConversionFactor}
\title{transform timeUnit to convert from seconds to specified unit}
\usage{
getTimeConversionFactor(timeUnit)
}
\description{
@param timeUnit character vector
}
\keyword{internal}
| /man/getTimeConversionFactor.Rd | no_license | cran/spectralAnalysis | R | false | true | 340 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internalHelpers.R
\name{getTimeConversionFactor}
\alias{getTimeConversionFactor}
\title{transform timeUnit to convert from seconds to specified unit}
\usage{
getTimeConversionFactor(timeUnit)
}
\description{
@param timeUnit character vector
}
\keyword{internal}
|
#' Get radiality centrality scores
#' @description Get the radiality centrality
#' for all nodes in a graph. These scores describe
#' the ease to which nodes can reach other nodes.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @param direction using \code{all} (the default), the
#' search will ignore edge direction while traversing
#' through the graph. With \code{out}, measurements of
#' paths will be from a node whereas with \code{in},
#' measurements of paths will be to a node.
#' @return a data frame with radiality centrality
#' scores for each of the nodes.
#' @examples
#' # Create a random graph using the
#' # `add_gnm_graph()` function
#' graph <-
#' create_graph() %>%
#' add_gnm_graph(
#' n = 10,
#' m = 15,
#' set_seed = 23)
#'
#' # Get the radiality scores for nodes in the graph
#' get_radiality(graph)
#' #> id radiality
#' #> 1 1 2.3333
#' #> 2 2 3.0000
#' #> 3 3 2.6667
#' #> 4 4 2.8889
#' #> 5 5 2.5556
#' #> 6 6 2.4444
#' #> 7 7 2.6667
#' #> 8 8 2.7778
#' #> 9 9 2.1111
#' #> 10 10 2.3333
#'
#' # Add the radiality values
#' # to the graph as a node
#' # attribute
#' graph <-
#' graph %>%
#' join_node_attrs(
#' df = get_radiality(.))
#'
#' # Display the graph's node data frame
#' get_node_df(graph)
#' #> id type label radiality
#' #> 1 1 <NA> <NA> 2.3333
#' #> 2 2 <NA> <NA> 3.0000
#' #> 3 3 <NA> <NA> 2.6667
#' #> 4 4 <NA> <NA> 2.8889
#' #> 5 5 <NA> <NA> 2.5556
#' #> 6 6 <NA> <NA> 2.4444
#' #> 7 7 <NA> <NA> 2.6667
#' #> 8 8 <NA> <NA> 2.7778
#' #> 9 9 <NA> <NA> 2.1111
#' #> 10 10 <NA> <NA> 2.3333
#' @importFrom igraph distances diameter
#' @export get_radiality
get_radiality <- function(graph,
direction = "all") {
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop(
"The graph object is not valid.",
call. = FALSE)
}
# Ensure that values provided for the
# `direction` argument are from the
# valid options
if (!(direction %in% c("all", "in", "out"))) {
stop(
"Valid options for `direction` are `all`, `in`, or `out`.",
call. = FALSE)
}
# Get the number of nodes in the graph
n_nodes <- count_nodes(graph)
# Convert the graph to an igraph object
ig_graph <- to_igraph(graph)
# Get a matrix of shortest paths between all
# pairs of nodes in the graph
sp_matrix <-
igraph::distances(
graph = ig_graph,
mode = direction,
weights = NA)
# Get the graph diameter
diam <-
igraph::diameter(
graph = ig_graph,
directed = ifelse(direction == "all", FALSE, TRUE))
# Get the radiality values for all
# nodes in the graph
radiality_values <-
apply(
X = sp_matrix,
MARGIN = 1,
FUN = function(x) {
if (all(x == Inf)) {
return(0)
}
else {
return(sum(diam + 1 - x[x != Inf])/(n_nodes - 1))
}
})
# Create df with radiality scores
data.frame(
id = radiality_values %>%
names() %>%
as.integer(),
radiality = radiality_values %>% round(4),
stringsAsFactors = FALSE)
}
| /R/get_radiality.R | permissive | wk1984/DiagrammeR | R | false | false | 3,223 | r | #' Get radiality centrality scores
#' @description Get the radiality centrality
#' for all nodes in a graph. These scores describe
#' the ease to which nodes can reach other nodes.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @param direction using \code{all} (the default), the
#' search will ignore edge direction while traversing
#' through the graph. With \code{out}, measurements of
#' paths will be from a node whereas with \code{in},
#' measurements of paths will be to a node.
#' @return a data frame with radiality centrality
#' scores for each of the nodes.
#' @examples
#' # Create a random graph using the
#' # `add_gnm_graph()` function
#' graph <-
#' create_graph() %>%
#' add_gnm_graph(
#' n = 10,
#' m = 15,
#' set_seed = 23)
#'
#' # Get the radiality scores for nodes in the graph
#' get_radiality(graph)
#' #> id radiality
#' #> 1 1 2.3333
#' #> 2 2 3.0000
#' #> 3 3 2.6667
#' #> 4 4 2.8889
#' #> 5 5 2.5556
#' #> 6 6 2.4444
#' #> 7 7 2.6667
#' #> 8 8 2.7778
#' #> 9 9 2.1111
#' #> 10 10 2.3333
#'
#' # Add the radiality values
#' # to the graph as a node
#' # attribute
#' graph <-
#' graph %>%
#' join_node_attrs(
#' df = get_radiality(.))
#'
#' # Display the graph's node data frame
#' get_node_df(graph)
#' #> id type label radiality
#' #> 1 1 <NA> <NA> 2.3333
#' #> 2 2 <NA> <NA> 3.0000
#' #> 3 3 <NA> <NA> 2.6667
#' #> 4 4 <NA> <NA> 2.8889
#' #> 5 5 <NA> <NA> 2.5556
#' #> 6 6 <NA> <NA> 2.4444
#' #> 7 7 <NA> <NA> 2.6667
#' #> 8 8 <NA> <NA> 2.7778
#' #> 9 9 <NA> <NA> 2.1111
#' #> 10 10 <NA> <NA> 2.3333
#' @importFrom igraph distances diameter
#' @export get_radiality
get_radiality <- function(graph,
direction = "all") {
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop(
"The graph object is not valid.",
call. = FALSE)
}
# Ensure that values provided for the
# `direction` argument are from the
# valid options
if (!(direction %in% c("all", "in", "out"))) {
stop(
"Valid options for `direction` are `all`, `in`, or `out`.",
call. = FALSE)
}
# Get the number of nodes in the graph
n_nodes <- count_nodes(graph)
# Convert the graph to an igraph object
ig_graph <- to_igraph(graph)
# Get a matrix of shortest paths between all
# pairs of nodes in the graph
sp_matrix <-
igraph::distances(
graph = ig_graph,
mode = direction,
weights = NA)
# Get the graph diameter
diam <-
igraph::diameter(
graph = ig_graph,
directed = ifelse(direction == "all", FALSE, TRUE))
# Get the radiality values for all
# nodes in the graph
radiality_values <-
apply(
X = sp_matrix,
MARGIN = 1,
FUN = function(x) {
if (all(x == Inf)) {
return(0)
}
else {
return(sum(diam + 1 - x[x != Inf])/(n_nodes - 1))
}
})
# Create df with radiality scores
data.frame(
id = radiality_values %>%
names() %>%
as.integer(),
radiality = radiality_values %>% round(4),
stringsAsFactors = FALSE)
}
|
#################################################################
#################################################################
# Hazi feladat a fokomponenselemzes es faktorelemzes temaban #
#################################################################
#################################################################
# 1. Toltsd be a szukseges package-eket, es sajat funkciokat.
# Az alabbi package-ekre peldaul szukseged lehet:
library(tidyverse) # for tidy code
library(GGally) # for ggcorr
library(corrr) # network_plot
library(ggcorrplot) # for ggcorrplot
library(FactoMineR) # multiple PCA functions
library(factoextra) # visualisation functions for PCA (e.g. fviz_pca_var)
library(paran) # for paran
library(psych) # for the mixedCor, cortest.bartlett, KMO, fa functions
library(GPArotation) # for the psych fa function to have the required rotation functionalities
library(MVN) # for mvn function
library(ICS) # for multivariate skew and kurtosis test
fviz_loadnings_with_cor <- function(mod, axes = 1, loadings_above = 0.4){
require(factoextra)
require(dplyr)
require(ggplot2)
if(!is.na(as.character(mod$call$call)[1])){
if(as.character(mod$call$call)[1] == "PCA"){
contrib_and_cov = as.data.frame(rbind(mod[["var"]][["contrib"]], mod[["var"]][["cor"]]))
vars = rownames(mod[["var"]][["contrib"]])
attribute_type = rep(c("contribution","correlation"), each = length(vars))
contrib_and_cov = cbind(contrib_and_cov, attribute_type)
contrib_and_cov
plot_data = cbind(as.data.frame(cbind(contrib_and_cov[contrib_and_cov[,"attribute_type"] == "contribution",axes], contrib_and_cov[contrib_and_cov[,"attribute_type"] == "correlation",axes])), vars)
names(plot_data) = c("contribution", "correlation", "vars")
plot_data = plot_data %>%
mutate(correlation = round(correlation, 2))
plot = plot_data %>%
ggplot() +
aes(x = reorder(vars, -contribution), y = contribution, gradient = correlation, label = correlation)+
geom_col(aes(fill = correlation)) +
geom_hline(yintercept = mean(plot_data$contribution), col = "red", lty = "dashed") + scale_fill_gradient2() +
xlab("variable") +
coord_flip() +
geom_label(color = "black", fontface = "bold", position = position_dodge(0.5))
}
} else if(!is.na(as.character(mod$Call)[1])){
if(as.character(mod$Call)[1] == "fa"){
loadings_table = mod$loadings %>%
matrix(ncol = ncol(mod$loadings)) %>%
as_tibble() %>%
mutate(variable = mod$loadings %>% rownames()) %>%
gather(factor, loading, -variable) %>%
mutate(sign = if_else(loading >= 0, "positive", "negative"))
if(!is.null(loadings_above)){
loadings_table[abs(loadings_table[,"loading"]) < loadings_above,"loading"] = NA
loadings_table = loadings_table[!is.na(loadings_table[,"loading"]),]
}
if(!is.null(axes)){
loadings_table = loadings_table %>%
filter(factor == paste0("V",axes))
}
plot = loadings_table %>%
ggplot() +
aes(y = loading %>% abs(), x = reorder(variable, abs(loading)), fill = loading, label = round(loading, 2)) +
geom_col(position = "dodge") +
scale_fill_gradient2() +
coord_flip() +
geom_label(color = "black", fill = "white", fontface = "bold", position = position_dodge(0.5)) +
facet_wrap(~factor) +
labs(y = "Loading strength", x = "Variable")
}
}
return(plot)
}
# 2. Toltsd be a Big Five Inventory (bfi) adatbazist.
#
# Ez a psych package-be beepitett adatbazis, ami 2800 szemely valaszait
# tartalmazza a Big Five szemelyisegkerdoiv kerdeseira. Az elso 25 oszlop a kerdoiv kerdeseire
# adott valaszokat tartalmazza, az utolso harom oszlop (gender, education, es age) pedig
# demografiai kerdeseket tartalmaz
#
# A reszleteket az egyes itemekhez tartozo kerdesekrol es a valaszok kodolasarol elolvashatod
# ha lefuttatod a ?bfi parancsot.
?bfi
data(bfi)
my_data_bfi = bfi[,1:25]
# 3. Ebben a hazi feladatban az a feladatod, hogy vegezz feltaro faktorelemzest
# az adatokon, es hatarozd meg az itemek mogott megbuvo latens faktorokat.
#
# Eloszor keszitsd el az adatok korrelacios matrixat (itt is a "Polychoric Correlation"-t
# kell hasznalni, mert az kerdoivre adott valaszok ordinalis skalajuak). Mentsd el ezt a
# korrelacios matrixot egy onjektumba, es innen ezen a korrelacios matrixon futtasd a faktor-
# elemzest.
bfi_mixedCor = mixedCor(my_data_bfi, c=NULL, p=1:25)
bfi_correl = bfi_mixedCor$rho
# 4. Vizualizald a korrelaciokat legalabb egy vizualizacios modszerrel
ggcorrplot(bfi_correl, p.mat = cor_pmat(bfi_correl), hc.order=TRUE, type='lower')
ggcorr(bfi_correl)
network_plot(bfi_correl, min_cor=0.6)
# 5. Hatarozd meg hogy faktorizalhatoak-e az adatok a Kaiser-Meyer-Olkin
# teszt alapjan. Indolkold meg a dontesedet.
KMO(bfi_correl)
### igen, mert a KMO osszerteke 0.85, es az egyes itemekhez tartozo KMO ertek is
### magasabb 0.6-nal.
# 6. Ellenorizd hogy az adatok tobbvaltozos normalis eloszlast kovetnek-e.
# Az eredmeny alapjan hataroz meg, melyik faktorextrakcios modszert fogod hasznalni.
result <- mvn(my_data_bfi, mvnTest = "hz")
result$multivariateNormality
mvnorm.kur.test(na.omit(my_data_bfi))
mvnorm.skew.test(na.omit(my_data_bfi))
### nem, a normalitas feltetele serult, ezert a paf
## faktorextrakcios modszert hasznalom majd.
# 7. Hatarozd meg az idealis faktorszamot
fa.parallel(bfi_correl, n.obs = nrow(my_data_bfi),
fa = "fa", fm = "pa")
nfactors(bfi_correl, n.obs = nrow(my_data_bfi))
### itt tobb lehetseges jo megoldas is van, 4-6 faktoring indikolt a dontes
# 8. Vegezd el a faktorextrakciot az altalad valasztott modszerrel es faktorszammal.
# Epits ket kulonbozo modellt, az egyikben ortoginalis, a masikat oblique
# faktorrotaciot hasznalva.
EFA_mod_ortogonal <- fa(bfi_correl, nfactors = 5, fm="pa", rotate = "varimax")
EFA_mod_oblique <- fa(bfi_correl, nfactors = 5, fm="pa", rotate = "oblimin")
# 9. Ellenorizd a kommunalitasokat. Mekkora az talagos kommunalitas?
as.data.frame(sort(EFA_mod_ortogonal$communality, decreasing = TRUE))
mean(EFA_mod_ortogonal$communality)
# 10. Vizualizald a ket modell altal produkalt faktorstrukturat es a faktortolteseket
# a ?bfi parancs segitsegevel nezheted meg az egyes valtozokhoz tartozo pontos
# kerdeseket.
# Mi a kulonbseg a ket faktorszerkezet kozott?
fa.diagram(EFA_mod_ortogonal)
fa.diagram(EFA_mod_oblique)
fviz_loadnings_with_cor(EFA_mod_ortogonal, axes = 1, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_oblique, axes = 1, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_ortogonal, axes = 2, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_oblique, axes = 2, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_ortogonal, axes = 3, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_oblique, axes = 3, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_ortogonal, axes = 4, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_oblique, axes = 4, loadings_above = 0.4)
### A varimax forgatassal kapott faktorstruktura kicsit tisztabb, es konnyebben
### ertelmezheto, mivel korrelalatlanok a faktorok. A direct oblimin forgatassal
### a PA1 es a PA5 faktorok korrelalnak.
| /seminar_11/Homework/Homework_S11_solution.R | permissive | VamosiDaniel/PSZB17-210-Data-analysis-seminar | R | false | false | 7,637 | r | #################################################################
#################################################################
# Hazi feladat a fokomponenselemzes es faktorelemzes temaban #
#################################################################
#################################################################
# 1. Toltsd be a szukseges package-eket, es sajat funkciokat.
# Az alabbi package-ekre peldaul szukseged lehet:
library(tidyverse) # for tidy code
library(GGally) # for ggcorr
library(corrr) # network_plot
library(ggcorrplot) # for ggcorrplot
library(FactoMineR) # multiple PCA functions
library(factoextra) # visualisation functions for PCA (e.g. fviz_pca_var)
library(paran) # for paran
library(psych) # for the mixedCor, cortest.bartlett, KMO, fa functions
library(GPArotation) # for the psych fa function to have the required rotation functionalities
library(MVN) # for mvn function
library(ICS) # for multivariate skew and kurtosis test
fviz_loadnings_with_cor <- function(mod, axes = 1, loadings_above = 0.4){
require(factoextra)
require(dplyr)
require(ggplot2)
if(!is.na(as.character(mod$call$call)[1])){
if(as.character(mod$call$call)[1] == "PCA"){
contrib_and_cov = as.data.frame(rbind(mod[["var"]][["contrib"]], mod[["var"]][["cor"]]))
vars = rownames(mod[["var"]][["contrib"]])
attribute_type = rep(c("contribution","correlation"), each = length(vars))
contrib_and_cov = cbind(contrib_and_cov, attribute_type)
contrib_and_cov
plot_data = cbind(as.data.frame(cbind(contrib_and_cov[contrib_and_cov[,"attribute_type"] == "contribution",axes], contrib_and_cov[contrib_and_cov[,"attribute_type"] == "correlation",axes])), vars)
names(plot_data) = c("contribution", "correlation", "vars")
plot_data = plot_data %>%
mutate(correlation = round(correlation, 2))
plot = plot_data %>%
ggplot() +
aes(x = reorder(vars, -contribution), y = contribution, gradient = correlation, label = correlation)+
geom_col(aes(fill = correlation)) +
geom_hline(yintercept = mean(plot_data$contribution), col = "red", lty = "dashed") + scale_fill_gradient2() +
xlab("variable") +
coord_flip() +
geom_label(color = "black", fontface = "bold", position = position_dodge(0.5))
}
} else if(!is.na(as.character(mod$Call)[1])){
if(as.character(mod$Call)[1] == "fa"){
loadings_table = mod$loadings %>%
matrix(ncol = ncol(mod$loadings)) %>%
as_tibble() %>%
mutate(variable = mod$loadings %>% rownames()) %>%
gather(factor, loading, -variable) %>%
mutate(sign = if_else(loading >= 0, "positive", "negative"))
if(!is.null(loadings_above)){
loadings_table[abs(loadings_table[,"loading"]) < loadings_above,"loading"] = NA
loadings_table = loadings_table[!is.na(loadings_table[,"loading"]),]
}
if(!is.null(axes)){
loadings_table = loadings_table %>%
filter(factor == paste0("V",axes))
}
plot = loadings_table %>%
ggplot() +
aes(y = loading %>% abs(), x = reorder(variable, abs(loading)), fill = loading, label = round(loading, 2)) +
geom_col(position = "dodge") +
scale_fill_gradient2() +
coord_flip() +
geom_label(color = "black", fill = "white", fontface = "bold", position = position_dodge(0.5)) +
facet_wrap(~factor) +
labs(y = "Loading strength", x = "Variable")
}
}
return(plot)
}
# 2. Toltsd be a Big Five Inventory (bfi) adatbazist.
#
# Ez a psych package-be beepitett adatbazis, ami 2800 szemely valaszait
# tartalmazza a Big Five szemelyisegkerdoiv kerdeseira. Az elso 25 oszlop a kerdoiv kerdeseire
# adott valaszokat tartalmazza, az utolso harom oszlop (gender, education, es age) pedig
# demografiai kerdeseket tartalmaz
#
# A reszleteket az egyes itemekhez tartozo kerdesekrol es a valaszok kodolasarol elolvashatod
# ha lefuttatod a ?bfi parancsot.
?bfi
data(bfi)
my_data_bfi = bfi[,1:25]
# 3. Ebben a hazi feladatban az a feladatod, hogy vegezz feltaro faktorelemzest
# az adatokon, es hatarozd meg az itemek mogott megbuvo latens faktorokat.
#
# Eloszor keszitsd el az adatok korrelacios matrixat (itt is a "Polychoric Correlation"-t
# kell hasznalni, mert az kerdoivre adott valaszok ordinalis skalajuak). Mentsd el ezt a
# korrelacios matrixot egy onjektumba, es innen ezen a korrelacios matrixon futtasd a faktor-
# elemzest.
bfi_mixedCor = mixedCor(my_data_bfi, c=NULL, p=1:25)
bfi_correl = bfi_mixedCor$rho
# 4. Vizualizald a korrelaciokat legalabb egy vizualizacios modszerrel
ggcorrplot(bfi_correl, p.mat = cor_pmat(bfi_correl), hc.order=TRUE, type='lower')
ggcorr(bfi_correl)
network_plot(bfi_correl, min_cor=0.6)
# 5. Hatarozd meg hogy faktorizalhatoak-e az adatok a Kaiser-Meyer-Olkin
# teszt alapjan. Indolkold meg a dontesedet.
KMO(bfi_correl)
### igen, mert a KMO osszerteke 0.85, es az egyes itemekhez tartozo KMO ertek is
### magasabb 0.6-nal.
# 6. Ellenorizd hogy az adatok tobbvaltozos normalis eloszlast kovetnek-e.
# Az eredmeny alapjan hataroz meg, melyik faktorextrakcios modszert fogod hasznalni.
result <- mvn(my_data_bfi, mvnTest = "hz")
result$multivariateNormality
mvnorm.kur.test(na.omit(my_data_bfi))
mvnorm.skew.test(na.omit(my_data_bfi))
### nem, a normalitas feltetele serult, ezert a paf
## faktorextrakcios modszert hasznalom majd.
# 7. Hatarozd meg az idealis faktorszamot
fa.parallel(bfi_correl, n.obs = nrow(my_data_bfi),
fa = "fa", fm = "pa")
nfactors(bfi_correl, n.obs = nrow(my_data_bfi))
### itt tobb lehetseges jo megoldas is van, 4-6 faktoring indikolt a dontes
# 8. Vegezd el a faktorextrakciot az altalad valasztott modszerrel es faktorszammal.
# Epits ket kulonbozo modellt, az egyikben ortoginalis, a masikat oblique
# faktorrotaciot hasznalva.
EFA_mod_ortogonal <- fa(bfi_correl, nfactors = 5, fm="pa", rotate = "varimax")
EFA_mod_oblique <- fa(bfi_correl, nfactors = 5, fm="pa", rotate = "oblimin")
# 9. Ellenorizd a kommunalitasokat. Mekkora az talagos kommunalitas?
as.data.frame(sort(EFA_mod_ortogonal$communality, decreasing = TRUE))
mean(EFA_mod_ortogonal$communality)
# 10. Vizualizald a ket modell altal produkalt faktorstrukturat es a faktortolteseket
# a ?bfi parancs segitsegevel nezheted meg az egyes valtozokhoz tartozo pontos
# kerdeseket.
# Mi a kulonbseg a ket faktorszerkezet kozott?
fa.diagram(EFA_mod_ortogonal)
fa.diagram(EFA_mod_oblique)
fviz_loadnings_with_cor(EFA_mod_ortogonal, axes = 1, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_oblique, axes = 1, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_ortogonal, axes = 2, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_oblique, axes = 2, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_ortogonal, axes = 3, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_oblique, axes = 3, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_ortogonal, axes = 4, loadings_above = 0.4)
fviz_loadnings_with_cor(EFA_mod_oblique, axes = 4, loadings_above = 0.4)
### A varimax forgatassal kapott faktorstruktura kicsit tisztabb, es konnyebben
### ertelmezheto, mivel korrelalatlanok a faktorok. A direct oblimin forgatassal
### a PA1 es a PA5 faktorok korrelalnak.
|
#include "AEConfig.h"
#include "AE_EffectVers.h"
#ifndef AE_OS_WIN
#include "AE_General.r"
#endif
resource 'PiPL' (16000) {
{ /* array properties: 12 elements */
/* [1] */
Kind {
AEEffect
},
/* [2] */
Name {
"HistoGrid"
},
/* [3] */
Category {
"Sample Plug-ins"
},
#ifdef AE_OS_WIN
#ifdef AE_PROC_INTELx64
CodeWin64X86 {"EntryPointFunc"},
#else
CodeWin32X86 {"EntryPointFunc"},
#endif
#else
#ifdef AE_OS_MAC
CodeMachOPowerPC {"EntryPointFunc"},
CodeMacIntel32 {"EntryPointFunc"},
CodeMacIntel64 {"EntryPointFunc"},
#endif
#endif
/* [6] */
AE_PiPL_Version {
2,
0
},
/* [7] */
AE_Effect_Spec_Version {
PF_PLUG_IN_VERSION,
PF_PLUG_IN_SUBVERS
},
/* [8] */
AE_Effect_Version {
0x80001 /* 1.0 */
},
/* [9] */
AE_Effect_Info_Flags {
0
},
/* [10] */
AE_Effect_Global_OutFlags {
33588288
},
AE_Effect_Global_OutFlags_2 {
16782336
},
/* [11] */
AE_Effect_Match_Name {
"ADBE HistoGrid"
},
/* [12] */
AE_Reserved_Info {
8
}
}
};
| /code/Examples/UI/HistoGrid/HistoGridPiPL.r | no_license | majthehero/glitchFXdev | R | false | false | 1,043 | r | #include "AEConfig.h"
#include "AE_EffectVers.h"
#ifndef AE_OS_WIN
#include "AE_General.r"
#endif
resource 'PiPL' (16000) {
{ /* array properties: 12 elements */
/* [1] */
Kind {
AEEffect
},
/* [2] */
Name {
"HistoGrid"
},
/* [3] */
Category {
"Sample Plug-ins"
},
#ifdef AE_OS_WIN
#ifdef AE_PROC_INTELx64
CodeWin64X86 {"EntryPointFunc"},
#else
CodeWin32X86 {"EntryPointFunc"},
#endif
#else
#ifdef AE_OS_MAC
CodeMachOPowerPC {"EntryPointFunc"},
CodeMacIntel32 {"EntryPointFunc"},
CodeMacIntel64 {"EntryPointFunc"},
#endif
#endif
/* [6] */
AE_PiPL_Version {
2,
0
},
/* [7] */
AE_Effect_Spec_Version {
PF_PLUG_IN_VERSION,
PF_PLUG_IN_SUBVERS
},
/* [8] */
AE_Effect_Version {
0x80001 /* 1.0 */
},
/* [9] */
AE_Effect_Info_Flags {
0
},
/* [10] */
AE_Effect_Global_OutFlags {
33588288
},
AE_Effect_Global_OutFlags_2 {
16782336
},
/* [11] */
AE_Effect_Match_Name {
"ADBE HistoGrid"
},
/* [12] */
AE_Reserved_Info {
8
}
}
};
|
#' Use Make
#'
#' Add a (GNU-)Makefile(s) with special emphasis on the use of containers.
#' @param docker If true a setup is created that can partially send make commands to a Docker container
#' @param singularity If true a setup is created that can partially send make commands to a Singularity container (which requires the Dockerimage)
#' @param torque If true a setup is created that can partially send make comands to a TORQUE job scheduler. Especially usefull in combination with a Singularity container.
#' @param use_docker If true `use_docker()` is called.
#' @param use_singularity If true `use_singularity()` is called.
#' @param dockerignore If true a .dockerignore file is created.
#' @param open Open the newly created file for editing? Happens in RStudio, if applicable, or via utils::file.edit() otherwise.
#' @name make
NULL
#' @rdname make
#' @export
use_make <- function(docker = FALSE, singularity = FALSE, torque = FALSE, open = TRUE){
# start out with the simplist Makefile possible
template_data <- list(wrapper = FALSE,
docker = FALSE,
winpath = NULL,
singularity = FALSE,
torque = FALSE)
# add Docker & Wrapper to template
if(docker)use_make_docker()
if(fs::file_exists("Makefile_Docker") & docker){
template_data$wrapper <- TRUE
template_data$docker <- TRUE
template_data$winpath <- docker_windows_path(
"C:/Users/someuser/Documents/myproject/"
)
}
# add Singularity (and implicitly Docker)
if(singularity)use_make_singularity()
if(fs::file_exists("Makefile_Singularity") & singularity){
if(!fs::file_exists("Dockerfile"))usethis::ui_stop("Singularity depends in this setup on Docker.\nSet {usethis::ui_code('docker = TRUE')} & {usethis::ui_code('singularity = TRUE')}")
template_data$singularity <- TRUE
}
if(fs::file_exists("Makefile")){
usethis::ui_oops("Makefile already exists.")
} else {
usethis::use_template(
"Makefile.txt",
"Makefile",
data = template_data,
ignore = FALSE,
open = open,
package = "repro"
)
# check if there are some Rmds, if so recomend recommend to add it
rmds <- fs::path_rel(
fs::dir_ls(usethis::proj_path(), glob = "*.Rmd", recurse = TRUE),
usethis::proj_path()
)
if(length(rmds) > 0L){
usethis::ui_info("You probably want to add:\n{usethis::ui_value(rmds)}\nto the {usethis::ui_value('Makefile')}.\nHint: {usethis::ui_code('repro::use_make_rmd()')}")
}
}
}
#' @rdname make
#' @export
use_make_docker <- function(use_docker = TRUE, dockerignore = TRUE, open = FALSE){
if(!fs::file_exists("Dockerfile") & use_docker)use_docker()
usethis::use_template(
"Makefile_Docker",
"Makefile_Docker",
ignore = FALSE,
open = open,
package = "repro"
)
if(dockerignore){
usethis::use_template(
"dockerignore",
".dockerignore",
ignore = FALSE,
open = open,
package = "repro"
)
}
}
#' @rdname make
#' @export
use_make_singularity <- function(use_singularity = TRUE, open = FALSE){
if(use_singularity)1 + 2 #fixme
usethis::use_template(
"Makefile_Singularity",
"Makefile_Singularity",
ignore = FALSE,
open = FALSE,
package = "repro"
)
} | /R/make.R | permissive | annariha/repro | R | false | false | 3,313 | r | #' Use Make
#'
#' Add a (GNU-)Makefile(s) with special emphasis on the use of containers.
#' @param docker If true a setup is created that can partially send make commands to a Docker container
#' @param singularity If true a setup is created that can partially send make commands to a Singularity container (which requires the Dockerimage)
#' @param torque If true a setup is created that can partially send make comands to a TORQUE job scheduler. Especially usefull in combination with a Singularity container.
#' @param use_docker If true `use_docker()` is called.
#' @param use_singularity If true `use_singularity()` is called.
#' @param dockerignore If true a .dockerignore file is created.
#' @param open Open the newly created file for editing? Happens in RStudio, if applicable, or via utils::file.edit() otherwise.
#' @name make
NULL
#' @rdname make
#' @export
use_make <- function(docker = FALSE, singularity = FALSE, torque = FALSE, open = TRUE){
# start out with the simplist Makefile possible
template_data <- list(wrapper = FALSE,
docker = FALSE,
winpath = NULL,
singularity = FALSE,
torque = FALSE)
# add Docker & Wrapper to template
if(docker)use_make_docker()
if(fs::file_exists("Makefile_Docker") & docker){
template_data$wrapper <- TRUE
template_data$docker <- TRUE
template_data$winpath <- docker_windows_path(
"C:/Users/someuser/Documents/myproject/"
)
}
# add Singularity (and implicitly Docker)
if(singularity)use_make_singularity()
if(fs::file_exists("Makefile_Singularity") & singularity){
if(!fs::file_exists("Dockerfile"))usethis::ui_stop("Singularity depends in this setup on Docker.\nSet {usethis::ui_code('docker = TRUE')} & {usethis::ui_code('singularity = TRUE')}")
template_data$singularity <- TRUE
}
if(fs::file_exists("Makefile")){
usethis::ui_oops("Makefile already exists.")
} else {
usethis::use_template(
"Makefile.txt",
"Makefile",
data = template_data,
ignore = FALSE,
open = open,
package = "repro"
)
# check if there are some Rmds, if so recomend recommend to add it
rmds <- fs::path_rel(
fs::dir_ls(usethis::proj_path(), glob = "*.Rmd", recurse = TRUE),
usethis::proj_path()
)
if(length(rmds) > 0L){
usethis::ui_info("You probably want to add:\n{usethis::ui_value(rmds)}\nto the {usethis::ui_value('Makefile')}.\nHint: {usethis::ui_code('repro::use_make_rmd()')}")
}
}
}
#' @rdname make
#' @export
use_make_docker <- function(use_docker = TRUE, dockerignore = TRUE, open = FALSE){
if(!fs::file_exists("Dockerfile") & use_docker)use_docker()
usethis::use_template(
"Makefile_Docker",
"Makefile_Docker",
ignore = FALSE,
open = open,
package = "repro"
)
if(dockerignore){
usethis::use_template(
"dockerignore",
".dockerignore",
ignore = FALSE,
open = open,
package = "repro"
)
}
}
#' @rdname make
#' @export
use_make_singularity <- function(use_singularity = TRUE, open = FALSE){
if(use_singularity)1 + 2 #fixme
usethis::use_template(
"Makefile_Singularity",
"Makefile_Singularity",
ignore = FALSE,
open = FALSE,
package = "repro"
)
} |
PLS_beta <- function(dataY,dataX,nt=2,limQ2set=.0975,dataPredictY=dataX,modele="pls",family=NULL,typeVC="none",EstimXNA=FALSE,scaleX=TRUE,scaleY=NULL,pvals.expli=FALSE,alpha.pvals.expli=.05,MClassed=FALSE,tol_Xi=10^(-12),weights,method,sparse=FALSE,sparseStop=TRUE,naive=FALSE,link=NULL,link.phi=NULL,type="ML") {
##################################################
# #
# Initialization and formatting the inputs #
# #
##################################################
cat("____************************************************____\n")
if(any(apply(is.na(dataX),MARGIN=2,"all"))){return(vector("list",0)); cat("One of the columns of dataX is completely filled with missing data\n"); stop()}
if(any(apply(is.na(dataX),MARGIN=1,"all"))){return(vector("list",0)); cat("One of the rows of dataX is completely filled with missing data\n"); stop()}
if(identical(dataPredictY,dataX)){PredYisdataX <- TRUE} else {PredYisdataX <- FALSE}
if(!PredYisdataX){
if(any(apply(is.na(dataPredictY),MARGIN=2,"all"))){return(vector("list",0)); cat("One of the columns of dataPredictY is completely filled with missing data\n"); stop()}
if(any(apply(is.na(dataPredictY),MARGIN=1,"all"))){return(vector("list",0)); cat("One of the rows of dataPredictY is completely filled with missing data\n"); stop()}
}
if(missing(weights)){NoWeights=TRUE} else {if(all(weights==rep(1,length(dataY)))){NoWeights=TRUE} else {NoWeights=FALSE}}
if(missing(method)){method="logistic"}
if(missing(type)){method="ML"}
if(any(is.na(dataX))) {na.miss.X <- TRUE} else na.miss.X <- FALSE
if(any(is.na(dataY))) {na.miss.Y <- TRUE} else na.miss.Y <- FALSE
if(any(is.na(dataPredictY))) {na.miss.PredictY <- TRUE} else {na.miss.PredictY <- FALSE}
if(is.null(modele)){naive=FALSE} else {if(modele=="pls"){naive=FALSE} else {if(!missing(naive)){cat(paste("Only naive DoF can be used with PLS GLM or PLS BETA\n",sep=""))}; naive=TRUE}}
if(na.miss.X|na.miss.Y){naive=TRUE; cat(paste("Only naive DoF can be used with missing data\n",sep="")); if(!NoWeights){cat(paste("Weights cannot be used with missing data\n",sep=""))}}
if(!NoWeights){naive=TRUE; cat(paste("Only naive DoF can be used with weighted PLS\n",sep=""))} else {NoWeights=TRUE}
if(sparse){pvals.expli=TRUE}
if (!is.data.frame(dataX)) {dataX <- data.frame(dataX)}
if (is.null(modele) & !is.null(family)) {modele<-"pls-glm-family"}
if (!(modele %in% c("pls","pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson","pls-glm-polr","pls-beta"))) {print(modele);stop("'modele' not recognized")}
if (!(modele %in% "pls-glm-family") & !is.null(family)) {stop("Set 'modele=pls-glm-family' to use the family option")}
if (!(modele %in% "pls-beta") & !is.null(link)) {stop("Set 'modele=pls-beta' to use the link option")}
if (modele=="pls") {family<-NULL}
if (modele=="pls-beta") {family<-NULL}
if (modele=="pls-glm-Gamma") {family<-Gamma(link = "inverse")}
if (modele=="pls-glm-gaussian") {family<-gaussian(link = "identity")}
if (modele=="pls-glm-inverse.gaussian") {family<-inverse.gaussian(link = "1/mu^2")}
if (modele=="pls-glm-logistic") {family<-binomial(link = "logit")}
if (modele=="pls-glm-poisson") {family<-poisson(link = "log")}
if (modele=="pls-glm-polr") {family<-NULL}
if (!is.null(family)) {
if (is.character(family)) {family <- get(family, mode = "function", envir = parent.frame(n=sys.nframe()))}
if (is.function(family)) {family <- family()}
if (is.language(family)) {family <- eval(family)}
}
if (is.null(link)){link<-"logit"} else {if(!(link %in% c("logit", "probit", "cloglog", "cauchit", "log", "loglog")) & !is(link,"link-glm")) {link<-"logit"}}
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {print(family)}
if (modele %in% c("pls-glm-polr")) {cat("\nModel:", modele, "\n");cat("Method:", method, "\n\n")}
if (modele=="pls-beta") {cat("\nModel:", modele, "\n\n");cat("Link:", link, "\n\n");cat("Link.phi:", link.phi, "\n\n");cat("Type:", type, "\n\n")}
if (modele=="pls") {cat("\nModel:", modele, "\n\n")}
scaleY <- NULL
if (is.null(scaleY)) {
if (!(modele %in% c("pls"))) {scaleY <- FALSE} else {scaleY <- TRUE}
}
if (scaleY) {if(NoWeights){RepY <- scale(dataY)} else {meanY <- weighted.mean(dataY,weights); stdevY <- sqrt((length(dataY)-1)/length(dataY)*weighted.mean((dataY-meanY)^2,weights)); RepY <- (dataY-meanY)/stdevY; attr(RepY,"scaled:center") <- meanY ; attr(RepY,"scaled:scale") <- stdevY}}
else {
RepY <- dataY
attr(RepY,"scaled:center") <- 0
attr(RepY,"scaled:scale") <- 1
}
if (scaleX) {if(NoWeights){ExpliX <- scale(dataX)} else {meanX <- apply(dataX,2,weighted.mean,weights); stdevX <- sqrt((length(dataY)-1)/length(dataY)*apply((sweep(dataX,2,meanX))^2,2,weighted.mean,weights)); ExpliX <- sweep(sweep(dataX, 2, meanX), 2 ,stdevX, "/"); attr(ExpliX,"scaled:center") <- meanX ; attr(ExpliX,"scaled:scale") <- stdevX}
if(PredYisdataX){PredictY <- ExpliX} else {PredictY <- sweep(sweep(dataPredictY, 2, attr(ExpliX,"scaled:center")), 2 ,attr(ExpliX,"scaled:scale"), "/")}
}
else {
ExpliX <- dataX
attr(ExpliX,"scaled:center") <- rep(0,ncol(dataX))
attr(ExpliX,"scaled:scale") <- rep(1,ncol(dataX))
PredictY <- (dataPredictY)
}
if(is.null(colnames(ExpliX))){colnames(ExpliX)<-paste("X",1:ncol(ExpliX),sep=".")}
if(is.null(rownames(ExpliX))){rownames(ExpliX)<-1:nrow(ExpliX)}
XXNA <- !(is.na(ExpliX))
YNA <- !(is.na(RepY))
if(PredYisdataX){PredictYNA <- XXNA} else {PredictYNA <- !is.na(PredictY)}
ExpliXwotNA <- as.matrix(ExpliX)
ExpliXwotNA[!XXNA] <- 0
XXwotNA <- as.matrix(ExpliX)
XXwotNA[!XXNA] <- 0
dataXwotNA <- as.matrix(dataX)
dataXwotNA[!XXNA] <- 0
YwotNA <- as.matrix(RepY)
YwotNA[!YNA] <- 0
dataYwotNA <- as.matrix(dataY)
dataYwotNA[!YNA] <- 0
if(PredYisdataX){PredictYwotNA <- XXwotNA} else {
PredictYwotNA <- as.matrix(PredictY)
PredictYwotNA [is.na(PredictY)] <- 0
}
if (modele == "pls-glm-polr") {
dataY <- as.factor(dataY)
YwotNA <- as.factor(YwotNA)}
res <- list(nr=nrow(ExpliX),nc=ncol(ExpliX),nt=nt,ww=NULL,wwnorm=NULL,wwetoile=NULL,tt=NULL,pp=NULL,CoeffC=NULL,uscores=NULL,YChapeau=NULL,residYChapeau=NULL,RepY=RepY,na.miss.Y=na.miss.Y,YNA=YNA,residY=RepY,ExpliX=ExpliX,na.miss.X=na.miss.X,XXNA=XXNA,residXX=ExpliX,PredictY=PredictYwotNA,RSS=rep(NA,nt),RSSresidY=rep(NA,nt),R2=rep(NA,nt),R2residY=rep(NA,nt),press.ind=NULL,press.tot=NULL,Q2cum=rep(NA, nt),family=family,ttPredictY = NULL,typeVC=typeVC,dataX=dataX,dataY=dataY)
if(NoWeights){res$weights<-rep(1L,res$nr)} else {res$weights<-weights}
res$temppred <- NULL
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
if (scaleY) {res$YChapeau=rep(attr(RepY,"scaled:center"),nrow(ExpliX))
res$residYChapeau=rep(0,nrow(ExpliX))}
else
{res$YChapeau=rep(mean(RepY),nrow(ExpliX))
res$residYChapeau=rep(mean(RepY),nrow(ExpliX))}
}
################################################
################################################
## ##
## Beginning of the loop for the components ##
## ##
################################################
################################################
res$computed_nt <- 0
break_nt <- FALSE
break_nt_sparse <- FALSE
break_nt_sparse1 <- FALSE
break_nt_vc <- FALSE
break_nt_betareg <- FALSE
for (kk in 1:nt) {
XXwotNA <- as.matrix(res$residXX)
XXwotNA[!XXNA] <- 0
YwotNA <- as.matrix(res$residY)
YwotNA[!YNA] <- 0
tempww <- rep(0,res$nc)
temptest <- sqrt(colSums(res$residXX^2, na.rm=TRUE))
if(any(temptest<tol_Xi)) {
break_nt <- TRUE
if (is.null(names(which(temptest<tol_Xi)))) {
cat(paste("Warning : ",paste(names(which(temptest<tol_Xi)),sep="",collapse=" ")," < 10^{-12}\n",sep=""))
} else {
cat(paste("Warning : ",paste((which(temptest<tol_Xi)),sep="",collapse=" ")," < 10^{-12}\n",sep=""))
}
cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))
rm(temptest)
break
}
res$computed_nt <- kk
##############################################
# #
# Weight computation for each model #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
if(NoWeights){
tempww <- t(XXwotNA)%*%YwotNA/(t(XXNA)%*%YwotNA^2)
}
if(!NoWeights){
tempww <- t(XXwotNA*weights)%*%YwotNA/(t(XXNA*weights)%*%YwotNA^2)
}
if (pvals.expli) {
tempvalpvalstep <- 2 * pnorm(-abs(tempww))
temppvalstep <- (tempvalpvalstep < alpha.pvals.expli)
if(sparse&sparseStop){
if(sum(temppvalstep)==0L){
break_nt_sparse <- TRUE}
else
{tempww[!temppvalstep] <- 0}}
res$valpvalstep <- cbind(res$valpvalstep,tempvalpvalstep)
res$pvalstep <- cbind(res$pvalstep,temppvalstep)
}
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
if (!pvals.expli) {
XXwotNA[!XXNA] <- NA
for (jj in 1:(res$nc)) {
tempww[jj] <- coef(glm(YwotNA~cbind(res$tt,XXwotNA[,jj]),family=family))[kk+1]
}
XXwotNA[!XXNA] <- 0
rm(jj)}
else {
XXwotNA[!XXNA] <- NA
tempvalpvalstep <- rep(0,res$nc)
temppvalstep <- rep(0,res$nc)
for (jj in 1:(res$nc)) {
tmww <- summary(glm(YwotNA~cbind(res$tt,XXwotNA[,jj]),family=family))$coefficients[kk+1,]
tempww[jj] <- tmww[1]
tempvalpvalstep[jj] <- tmww[4]
temppvalstep[jj] <- (tmww[4] < alpha.pvals.expli)
}
if(sparse&sparseStop){
if(sum(temppvalstep)==0L){
break_nt_sparse <- TRUE}
else
{tempww[!temppvalstep] <- 0}}
XXwotNA[!XXNA] <- 0
rm(jj)
res$valpvalstep <- cbind(res$valpvalstep,tempvalpvalstep)
res$pvalstep <- cbind(res$pvalstep,temppvalstep)
}
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
YwotNA <- as.factor(YwotNA)
if (!pvals.expli) {
XXwotNA[!XXNA] <- NA
tts <- res$tt
for (jj in 1:(res$nc)) {
tempww[jj] <- -1*MASS::polr(YwotNA~cbind(tts,XXwotNA[,jj]),na.action=na.exclude,method=method)$coef[kk]
}
XXwotNA[!XXNA] <- 0
rm(jj,tts)}
else {
XXwotNA[!XXNA] <- NA
tts <- res$tt
tempvalpvalstep <- rep(0,res$nc)
temppvalstep <- rep(0,res$nc)
for (jj in 1:(res$nc)) {
tmww <- -1*summary(MASS::polr(YwotNA~cbind(tts,XXwotNA[,jj]),na.action=na.exclude,Hess=TRUE,method=method))$coefficients[kk,]
tempww[jj] <- tmww[1]
tempvalpvalstep[jj] <- 2 * pnorm(-abs(tmww[3]))
temppvalstep[jj] <- (tempvalpvalstep[jj] < alpha.pvals.expli)
}
if(sparse&sparseStop){
if(sum(temppvalstep)==0L){
break_nt_sparse <- TRUE}
else
{tempww[!temppvalstep] <- 0}}
XXwotNA[!XXNA] <- 0
rm(jj,tts)
res$valpvalstep <- cbind(res$valpvalstep,tempvalpvalstep)
res$pvalstep <- cbind(res$pvalstep,temppvalstep)
}
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
if (!pvals.expli) {
XXwotNA[!XXNA] <- NA
tts <- res$tt
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
#assign("tts", tts, envir=parent.frame(n=sys.nframe()))
#assign("XXwotNA", XXwotNA, envir=parent.frame(n=sys.nframe()))
for (jj in 1:(res$nc)) {
#assign("jj", jj, envir=parent.frame(n=sys.nframe()))
temptempww <- try(coef(betareg::betareg(YwotNA~cbind(tts,XXwotNA[,jj]),link=link,link.phi=link.phi,type=type,phi=FALSE))[kk+1],silent=TRUE)
if(is.numeric(temptempww)){tempww[jj] <- temptempww} else {break_nt_betareg <- TRUE; break}
}
if(break_nt_betareg){
res$computed_nt <- kk-1
cat(paste("Error in betareg found\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components were thus extracted\n",sep=""))
break}
XXwotNA[!XXNA] <- 0
rm(jj,tts)}
else {
XXwotNA[!XXNA] <- NA
tts <- res$tt
tempvalpvalstep <- rep(0,res$nc)
temppvalstep <- rep(0,res$nc)
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
#assign("tts", tts, envir=parent.frame(n=sys.nframe()))
#assign("XXwotNA", XXwotNA, envir=parent.frame(n=sys.nframe()))
for (jj in 1:(res$nc)) {
#assign("jj", jj, envir=parent.frame(n=sys.nframe()))
temptempww <- try(summary(betareg::betareg(YwotNA~cbind(tts,XXwotNA[,jj]),hessian=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type))$coefficients$mean[kk+1,],silent=TRUE)
if(is.numeric(temptempww)){tmww <- temptempww} else {break_nt_betareg <- TRUE; break}
tempww[jj] <- tmww[1]
tempvalpvalstep[jj] <- tmww[4]
temppvalstep[jj] <- (tmww[4] < alpha.pvals.expli)
}
if(break_nt_betareg){
res$computed_nt <- kk-1
cat(paste("Error in betareg found\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components were thus extracted\n",sep=""))
break}
if(sparse&sparseStop){
if(sum(temppvalstep)==0L){
break_nt_sparse <- TRUE}
else
{tempww[!temppvalstep] <- 0}}
XXwotNA[!XXNA] <- 0
rm(jj,tts)
res$valpvalstep <- cbind(res$valpvalstep,tempvalpvalstep)
res$pvalstep <- cbind(res$pvalstep,temppvalstep)
}
}
##############################################
# #
# Computation of the components (model free) #
# #
##############################################
if((break_nt_sparse)&(kk==1L)){
cat(paste("No significant predictors (<",alpha.pvals.expli,") found\n",sep=""))
cat(paste("Warning only one standard component (without sparse option) was thus extracted\n",sep=""))
break_nt_sparse1 <- TRUE
}
if((break_nt_sparse)&!(kk==1L)){
res$computed_nt <- kk-1
if(!(break_nt_sparse1)){
cat(paste("No more significant predictors (<",alpha.pvals.expli,") found\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components were thus extracted\n",sep=""))
}
break}
tempwwnorm <- tempww/sqrt(drop(crossprod(tempww)))
temptt <- XXwotNA%*%tempwwnorm/(XXNA%*%(tempwwnorm^2))
temppp <- rep(0,res$nc)
for (jj in 1:(res$nc)) {
temppp[jj] <- crossprod(temptt,XXwotNA[,jj])/drop(crossprod(XXNA[,jj],temptt^2))
}
res$residXX <- XXwotNA-temptt%*%temppp
if (na.miss.X & !na.miss.Y) {
for (ii in 1:res$nr) {
if(rcond(t(cbind(res$pp,temppp)[XXNA[ii,],,drop=FALSE])%*%cbind(res$pp,temppp)[XXNA[ii,],,drop=FALSE])<tol_Xi) {
break_nt <- TRUE; res$computed_nt <- kk-1
cat(paste("Warning : reciprocal condition number of t(cbind(res$pp,temppp)[XXNA[",ii,",],,drop=FALSE])%*%cbind(res$pp,temppp)[XXNA[",ii,",],,drop=FALSE] < 10^{-12}\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))
break
}
}
rm(ii)
if(break_nt==TRUE) {break}
}
if(!PredYisdataX){
if (na.miss.PredictY & !na.miss.Y) {
for (ii in 1:nrow(PredictYwotNA)) {
if(rcond(t(cbind(res$pp,temppp)[PredictYNA[ii,],,drop=FALSE])%*%cbind(res$pp,temppp)[PredictYNA[ii,],,drop=FALSE])<tol_Xi) {
break_nt <- TRUE; res$computed_nt <- kk-1
cat(paste("Warning : reciprocal condition number of t(cbind(res$pp,temppp)[PredictYNA[",ii,",,drop=FALSE],])%*%cbind(res$pp,temppp)[PredictYNA[",ii,",,drop=FALSE],] < 10^{-12}\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))
break
}
}
rm(ii)
if(break_nt==TRUE) {break}
}
}
if (modele %in% c("pls-beta")) {
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
tt<-cbind(res$tt,temptt)
#assign("tt", tt, envir=parent.frame(n=sys.nframe()))
if (kk==1) {
coeftempconstbeta <- try(coef(betareg::betareg(YwotNA~1,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)),silent=TRUE)
if(!is.numeric(coeftempconstbeta)){
res$computed_nt <- kk-1
cat(paste("Error in betareg found\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components were thus extracted\n",sep=""))
break}
rm(coeftempconstbeta)
}
coeftempregbeta <- try(coef(betareg::betareg(YwotNA~tt,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)),silent=TRUE)
if(!is.numeric(coeftempregbeta)){
res$computed_nt <- kk-1
cat(paste("Error in betareg found\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components were thus extracted\n",sep=""))
break}
rm(tt,envir=parent.frame(n=sys.nframe()))
rm(tt)
rm(YwotNA,envir=parent.frame(n=sys.nframe()))
rm(coeftempregbeta)
}
res$ww <- cbind(res$ww,tempww)
res$wwnorm <- cbind(res$wwnorm,tempwwnorm)
res$tt <- cbind(res$tt,temptt)
res$pp <- cbind(res$pp,temppp)
##############################################
# #
# Computation of the coefficients #
# of the model with kk components #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
if (kk==1) {
tempCoeffC <- solve(t(res$tt[YNA])%*%res$tt[YNA])%*%t(res$tt[YNA])%*%YwotNA[YNA]
res$CoeffCFull <- matrix(c(tempCoeffC,rep(NA,nt-kk)),ncol=1)
tempCoeffConstante <- 0
} else {
if (!(na.miss.X | na.miss.Y)) {
tempCoeffC <- c(rep(0,kk-1),solve(t(res$tt[YNA,kk])%*%res$tt[YNA,kk])%*%t(res$tt[YNA,kk])%*%YwotNA[YNA])
tempCoeffConstante <- 0
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
}
else
{
tempCoeffC <- c(rep(0,kk-1),solve(t(res$tt[YNA,kk])%*%res$tt[YNA,kk])%*%t(res$tt[YNA,kk])%*%YwotNA[YNA])
tempCoeffConstante <- 0
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- diag(res$CoeffCFull)
res$CoeffConstante <- tempCoeffConstante
res$Std.Coeffs <- rbind(tempCoeffConstante,res$wwetoile%*%res$CoeffC)
rownames(res$Std.Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
if (kk==1) {
tempconstglm <- glm(YwotNA~1,family=family)
res$AIC <- AIC(tempconstglm)
res$BIC <- AIC(tempconstglm, k = log(res$nr))
res$Coeffsmodel_vals <- rbind(summary(tempconstglm)$coefficients,matrix(rep(NA,4*nt),ncol=4))
res$ChisqPearson <- crossprod(residuals.glm(tempconstglm,type="pearson"))
#if ((modele %in% c("pls-glm-logistic"))|(family$family=="binomial")) {
res$MissClassed <- sum(unclass(res$RepY)!=ifelse(predict(tempconstglm,type="response") < 0.5, 0,1))
#}
rm(tempconstglm)
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregglm))
res$BIC <- cbind(res$BIC,AIC(tempregglm, k = log(res$nr)))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals.glm(tempregglm,type="pearson")))
#if ((modele %in% c("pls-glm-logistic"))|(family$family=="binomial")) {
res$MissClassed <- cbind(res$MissClassed,sum(unclass(res$RepY)!=ifelse(predict(tempregglm,type="response") < 0.5, 0,1)))
#}
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- matrix(c(tempCoeffC,rep(NA,nt-kk)),ncol=1)
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- tempCoeffConstante
tempCoeffC <- tempCoeffC[-1]
} else {
if (!(na.miss.X | na.miss.Y)) {
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregglm))
res$BIC <- cbind(res$BIC,AIC(tempregglm, k = log(res$nr)))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals.glm(tempregglm,type="pearson")))
#if ((modele %in% c("pls-glm-logistic"))|(family$family=="binomial")) {
res$MissClassed <- cbind(res$MissClassed,sum(unclass(res$RepY)!=ifelse(predict(tempregglm,type="response") < 0.5, 0,1)))
#}
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
else
{
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregglm))
res$BIC <- cbind(res$BIC,AIC(tempregglm, k = log(res$nr)))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals.glm(tempregglm,type="pearson")))
#if ((modele %in% c("pls-glm-logistic"))|(family$family=="binomial")) {
res$MissClassed <- cbind(res$MissClassed,sum(unclass(res$RepY)!=ifelse(predict(tempregglm,type="response") < 0.5, 0,1)))
#}
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- tempCoeffC
res$Std.Coeffs <- rbind(tempCoeffConstante,res$wwetoile%*%res$CoeffC)
rownames(res$Std.Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
Varyy <- function(piVaryy) {
diag(piVaryy[-length(piVaryy)])-piVaryy[-length(piVaryy)]%*%t(piVaryy[-length(piVaryy)])
}
Chisqcomp <- function(yichisq,pichisq) {
t(yichisq[-length(yichisq)]-pichisq[-length(pichisq)])%*%MASS::ginv(Varyy(pichisq))%*%(yichisq[-length(yichisq)]-pichisq[-length(pichisq)])
}
Chiscompmatrix <- function(rowspi,rowsyi) {
sum(mapply(Chisqcomp,rowsyi,rowspi))
}
if (kk==1) {
tempconstpolr <- MASS::polr(YwotNA~1,na.action=na.exclude,Hess=TRUE,method=method)
res$AIC <- AIC(tempconstpolr)
res$BIC <- AIC(tempconstpolr, k = log(res$nr))
res$MissClassed <- sum(!(unclass(predict(tempconstpolr,type="class"))==unclass(res$RepY)))
res$Coeffsmodel_vals <- rbind(summary(tempconstpolr)$coefficients,matrix(rep(NA,3*nt),ncol=3))
tempmodord <- predict(tempconstpolr,type="class")
tempfff <- ~tempmodord-1
tempm <- model.frame(tempfff, tempmodord)
tempmat <- model.matrix(tempfff, model.frame(tempfff, tempmodord))
res$ChisqPearson <- sum(Chiscompmatrix(as.list(as.data.frame(t(predict(tempconstpolr,type="probs")))),as.list(as.data.frame(t(tempmat)))))
rm(tempconstpolr)
tts<-res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$AIC <- cbind(res$AIC,AIC(tempregpolr))
res$BIC <- cbind(res$BIC,AIC(tempregpolr, k = log(res$nr)))
res$MissClassed <- cbind(res$MissClassed,sum(!(unclass(predict(tempregpolr,type="class"))==unclass(res$RepY))))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempmodord <- predict(tempregpolr,type="class")
tempfff <- ~tempmodord-1
tempm <- model.frame(tempfff, tempmodord)
tempmat <- model.matrix(tempfff, model.frame(tempfff, tempmodord))
res$ChisqPearson <- c(res$ChisqPearson,sum(Chiscompmatrix(as.list(as.data.frame(t(predict(tempregpolr,type="probs")))),as.list(as.data.frame(t(tempmat))))))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- matrix(c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)),ncol=1)
res$CoeffConstante <- tempCoeffConstante
} else {
if (!(na.miss.X | na.miss.Y)) {
tts <- res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$AIC <- cbind(res$AIC,AIC(tempregpolr))
res$BIC <- cbind(res$BIC,AIC(tempregpolr, k = log(res$nr)))
res$MissClassed <- cbind(res$MissClassed,sum(!(unclass(predict(tempregpolr,type="class"))==unclass(res$RepY))))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempmodord <- predict(tempregpolr,type="class")
tempfff <- ~tempmodord-1
tempm <- model.frame(tempfff, tempmodord)
tempmat <- model.matrix(tempfff, model.frame(tempfff, tempmodord))
res$ChisqPearson <- c(res$ChisqPearson,sum(Chiscompmatrix(as.list(as.data.frame(t(predict(tempregpolr,type="probs")))),as.list(as.data.frame(t(tempmat))))))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)))
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
}
else
{
tts<-res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$AIC <- cbind(res$AIC,AIC(tempregpolr))
res$BIC <- cbind(res$BIC,AIC(tempregpolr, k = log(res$nr)))
res$MissClassed <- cbind(res$MissClassed,sum(!(unclass(predict(tempregpolr,type="class"))==unclass(res$RepY))))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempmodord <- predict(tempregpolr,type="class")
tempfff <- ~tempmodord-1
tempm <- model.frame(tempfff, tempmodord)
tempmat <- model.matrix(tempfff, model.frame(tempfff, tempmodord))
res$ChisqPearson <- c(res$ChisqPearson,sum(Chiscompmatrix(as.list(as.data.frame(t(predict(tempregpolr,type="probs")))),as.list(as.data.frame(t(tempmat))))))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)))
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- tempCoeffC
res$Std.Coeffs <- as.matrix(rbind(as.matrix(tempCoeffConstante),res$wwetoile%*%res$CoeffC))
rownames(res$Std.Coeffs) <- c(names(tempregpolr$zeta),colnames(ExpliX))
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
if (kk==1) {
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
tempconstbeta <- betareg::betareg(YwotNA~1,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)
res$AIC <- AIC(tempconstbeta)
res$BIC <- AIC(tempconstbeta, k = log(res$nr))
res$pseudo.R2 <- NULL
res$Coeffsmodel_vals <- rbind(summary(tempconstbeta)$coefficients$mean,matrix(rep(NA,4*nt),ncol=4))
res$ChisqPearson <- crossprod(residuals(tempconstbeta,type="pearson"))
rm(tempconstbeta)
tt<-res$tt
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
#assign("tt", tt, envir=parent.frame(n=sys.nframe()))
tempregbeta <- betareg::betareg(YwotNA~tt,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregbeta))
res$BIC <- cbind(res$BIC,AIC(tempregbeta, k = log(res$nr)))
res$pseudo.R2 <- cbind(res$pseudo.R2,tempregbeta$pseudo.r.squared)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregbeta)$coefficients$mean,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals(tempregbeta,type="pearson")))
tempCoeffC <- as.vector(tempregbeta$coefficients$mean)
res$CoeffCFull <- matrix(c(tempCoeffC,rep(NA,nt-kk)),ncol=1)
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- tempCoeffConstante
tempCoeffC <- tempCoeffC[-1]
} else {
if (!(na.miss.X | na.miss.Y)) {
tt<-res$tt
#assign("tt", tt, envir=parent.frame(n=sys.nframe()))
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
tempregbeta <- betareg::betareg(YwotNA~tt,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregbeta))
res$BIC <- cbind(res$BIC,AIC(tempregbeta, k = log(res$nr)))
res$pseudo.R2 <- cbind(res$pseudo.R2,tempregbeta$pseudo.r.squared)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregbeta)$coefficients$mean,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals(tempregbeta,type="pearson")))
tempCoeffC <- as.vector(tempregbeta$coefficients$mean)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
else
{
tt<-res$tt
#assign("tt", tt, envir=parent.frame(n=sys.nframe()))
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
tempregbeta <- betareg::betareg(YwotNA~tt,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregbeta))
res$BIC <- cbind(res$BIC,AIC(tempregbeta, k = log(res$nr)))
res$pseudo.R2 <- cbind(res$pseudo.R2,tempregbeta$pseudo.r.squared)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregbeta)$coefficients$mean,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals(tempregbeta,type="pearson")))
tempCoeffC <- as.vector(tempregbeta$coefficients$mean)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- tempCoeffC
res$Std.Coeffs <- rbind(tempCoeffConstante,res$wwetoile%*%res$CoeffC)
rownames(res$Std.Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
# #
# Prediction of the components #
# as if missing values (model free) #
# For cross-validating the GLM #
# #
##############################################
if (!(na.miss.X | na.miss.Y)) {
##############################################
# #
# Cross validation #
# without missing value #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$residYChapeau <- res$tt%*%tempCoeffC
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$tt%*%res$CoeffC
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
res$residYChapeau <- tempregglm$linear.predictors
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*res$Std.Coeffs[1]
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- tempregglm$fitted.values
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*tempCoeffConstante
res$Coeffs <- rbind(as.matrix(tempConstante),tempCoeffs)
rownames(res$Coeffs) <- rownames(res$Std.Coeffs)
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
res$residYChapeau <- predict(tempregbeta,type="link")
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*res$Std.Coeffs[1]
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- predict(tempregbeta,type="response")
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
}
else {
if (na.miss.X & !na.miss.Y) {
##############################################
# #
# Cross validation #
# with missing value(s) #
# #
##############################################
if (kk==1) {
cat("____There are some NAs in X but not in Y____\n")
}
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$tt%*%res$CoeffC
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
res$residYChapeau <- tempregglm$linear.predictors
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*res$Std.Coeffs[1]
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- tempregglm$fitted.values
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")* tempCoeffConstante
res$Coeffs <- rbind(as.matrix(tempConstante),tempCoeffs)
rownames(res$Coeffs) <- rownames(res$Std.Coeffs)
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
res$residYChapeau <- predict(tempregbeta,type="link")
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*res$Std.Coeffs[1]
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- predict(tempregbeta,type="response")
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
}
else {
if (kk==1) {
cat("____There are some NAs both in X and Y____\n")
}
}
}
##############################################
# #
# Update and end of loop cleaning #
# (Especially useful for PLS) #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$uscores <- cbind(res$uscores,res$residY/res$CoeffC[kk])
res$residY <- res$residY - res$tt%*%tempCoeffC
res$residusY <- cbind(res$residusY,res$residY)
if (kk==1) {
res$AIC.std <- AIC(lm(res$RepY~1,weights=res$weights))
res$AIC.std <- cbind(res$AIC.std,AICpls(kk,res$residY,weights=res$weights))
res$AIC <- AIC(lm(dataY~1))
res$AIC <- cbind(res$AIC,AICpls(kk,res$Yresidus,weights=res$weights))
if (MClassed) {
res$MissClassed <- sum(unclass(dataY)!=ifelse(predict(lm(dataY~1,weights=res$weights)) < 0.5, 0,1))
res$MissClassed <- cbind(res$MissClassed,sum(unclass(dataY)!=ifelse(res$YChapeau < 0.5, 0,1)))
tempprob <- res$Probs <- predict(lm(dataY~1,weights=res$weights))
tempprob <- ifelse(tempprob<0,0,tempprob)
res$Probs.trc <- ifelse(tempprob>1,1,tempprob)
res$Probs <- cbind(res$Probs,res$YChapeau)
tempprob <- ifelse(res$YChapeau<0,0,res$YChapeau)
tempprob <- ifelse(tempprob>1,1,tempprob)
res$Probs.trc <- cbind(res$Probs.trc,tempprob)
}
} else {
res$AIC.std <- cbind(res$AIC.std,AICpls(kk,res$residY,weights=res$weights))
res$AIC <- cbind(res$AIC,AICpls(kk,res$Yresidus,weights=res$weights))
if (MClassed) {
res$MissClassed <- cbind(res$MissClassed,sum(unclass(dataY)!=ifelse(res$YChapeau < 0.5, 0,1)))
res$Probs <- cbind(res$Probs,res$YChapeau)
tempprob <- ifelse(res$YChapeau<0,0,res$YChapeau)
tempprob <- ifelse(tempprob>1,1,tempprob)
res$Probs.trc <- cbind(res$Probs.trc,tempprob)
}
}
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
res$residY <- res$residY
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
res$residY <- res$residY
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
res$residY <- res$residY
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
cat("____Component____",kk,"____\n")
}
##############################################
##############################################
## ##
## End of the loop on the components ##
## ##
##############################################
##############################################
if(res$computed_nt==0){
cat("No component could be extracted please check the data for NA only lines or columns\n"); stop()
}
if (pvals.expli&!(modele=="pls")) {
res$Coeffsmodel_vals<-res$Coeffsmodel_vals[1:(dim(res$Coeffsmodel_vals)[1]-(nt-res$computed_nt)),]
}
##############################################
# #
# Predicting components #
# #
##############################################
if (!(na.miss.PredictY | na.miss.Y)) {
cat("____Predicting X without NA neither in X nor in Y____\n")
res$ttPredictY <- PredictYwotNA%*%res$wwetoile
colnames(res$ttPredictY) <- paste("tt",1:res$computed_nt,sep="")
}
else {
if (na.miss.PredictY & !na.miss.Y) {
cat("____Predicting X with NA in X and not in Y____\n")
for (ii in 1:nrow(PredictYwotNA)) {
res$ttPredictY <- rbind(res$ttPredictY,t(solve(t(res$pp[PredictYNA[ii,],,drop=FALSE])%*%res$pp[PredictYNA[ii,],,drop=FALSE])%*%t(res$pp[PredictYNA[ii,],,drop=FALSE])%*%(PredictYwotNA[ii,])[PredictYNA[ii,]]))
}
colnames(res$ttPredictY) <- paste("tt",1:res$computed_nt,sep="")
}
else {
cat("____There are some NAs both in X and Y____\n")
}
}
##############################################
# #
# Computing RSS, PRESS, #
# Chi2, Q2 and Q2cum #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$R2residY <- 1-res$RSSresidY[2:(res$computed_nt+1)]/res$RSSresidY[1]
res$R2 <- 1-res$RSS[2:(res$computed_nt+1)]/res$RSS[1]
if (MClassed==FALSE) {
res$InfCrit <- t(rbind(res$AIC, res$RSS, c(NA,res$R2), c(NA,res$R2residY), res$RSSresidY, res$AIC.std))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "RSS_Y", "R2_Y", "R2_residY", "RSS_residY", "AIC.std"))
res$ic.dof<-infcrit.dof(res,naive=naive)
res$InfCrit <- cbind(res$InfCrit,res$ic.dof)
} else {
res$InfCrit <- t(rbind(res$AIC, res$RSS, c(NA,res$R2), res$MissClassed, c(NA,res$R2residY), res$RSSresidY, res$AIC.std))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "RSS_Y", "R2_Y", "MissClassed", "R2_residY", "RSS_residY", "AIC.std"))
res$ic.dof<-infcrit.dof(res,naive=naive)
res$InfCrit <- cbind(res$InfCrit,res$ic.dof)
}
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
res$R2residY <- 1-res$RSSresidY[2:(res$computed_nt+1)]/res$RSSresidY[1]
res$R2 <- 1-res$RSS[2:(res$computed_nt+1)]/res$RSS[1]
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
res$InfCrit <- t(rbind(res$AIC, res$BIC, res$ChisqPearson, res$RSS, c(NA,res$R2), c(NA,res$R2residY), res$RSSresidY))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "BIC", "Chi2_Pearson_Y", "RSS_Y", "R2_Y", "R2_residY", "RSS_residY"))
}
if ((modele %in% c("pls-glm-logistic"))|(family$family=="binomial")) {
res$InfCrit <- t(rbind(res$AIC, res$BIC, res$MissClassed, res$ChisqPearson, res$RSS, c(NA,res$R2), c(NA,res$R2residY), res$RSSresidY))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "BIC", "Missclassed", "Chi2_Pearson_Y", "RSS_Y", "R2_Y", "R2_residY", "RSS_residY"))
}
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele == "pls-glm-polr") {
res$InfCrit <- t(rbind(res$AIC, res$BIC, res$MissClassed, res$ChisqPearson))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "BIC", "Missclassed", "Chi2_Pearson_Y"))
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
res$R2residY <- 1-res$RSSresidY[2:(res$computed_nt+1)]/res$RSSresidY[1]
res$R2 <- 1-res$RSS[2:(res$computed_nt+1)]/res$RSS[1]
res$InfCrit <- t(rbind(res$AIC, res$BIC, res$ChisqPearson, res$RSS, c(NA,res$pseudo.R2), c(NA,res$R2)))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "BIC", "Chi2_Pearson_Y", "RSS_Y", "pseudo_R2_Y", "R2_Y"))
}
##########################################
# #
# Predicting responses #
# #
##########################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$YChapeau <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$tt%*%res$CoeffC
rownames(res$YChapeau) <- rownames(ExpliX)
res$Std.ValsPredictY <- res$ttPredictY%*%res$CoeffC
res$ValsPredictY <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$ttPredictY%*%res$CoeffC
res$Std.XChapeau <- res$tt%*%t(res$pp)
rownames(res$Std.XChapeau) <- rownames(ExpliX)
if (EstimXNA) {
res$XChapeau <- sweep(sweep(res$Std.XChapeau,2,attr(res$ExpliX,"scaled:scale"),FUN="*"),2,attr(res$ExpliX,"scaled:center"),FUN="+")
rownames(res$XChapeau) <- rownames(ExpliX)
colnames(res$XChapeau) <- colnames(ExpliX)
res$XChapeauNA <- sweep(sweep(res$Std.XChapeau,2,attr(res$ExpliX,"scaled:scale"),FUN="*"),2,attr(res$ExpliX,"scaled:center"),FUN="+")*!XXNA
rownames(res$XChapeau) <- rownames(ExpliX)
colnames(res$XChapeau) <- colnames(ExpliX)
}
names(res$CoeffC) <- paste("Coeff_Comp_Reg",1:res$computed_nt)
rownames(res$Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
res$YChapeau <- as.matrix(tempregglm$fitted.values)
rownames(res$YChapeau) <- rownames(ExpliX)
tt <- res$ttPredictY
res$Std.ValsPredictY <- predict(tempregglm,newdata=data.frame(tt))
res$ValsPredictY <- predict(tempregglm,newdata=data.frame(tt),type = "response")
res$Std.XChapeau <- res$tt%*%t(res$pp)
rownames(res$Std.XChapeau) <- rownames(ExpliX)
names(res$CoeffC) <- paste("Coeff_Comp_Reg",1:res$computed_nt)
rownames(res$Coeffs) <- c("Intercept",colnames(ExpliX))
res$FinalModel <- tempregglm
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
res$YChapeau <- tempregpolr$fitted.values
res$YChapeauCat <- predict(tempregpolr,type="class")
rownames(res$YChapeau) <- rownames(ExpliX)
res$ValsPredictY <- predict(tempregpolr, data.frame(tts=I(res$ttPredictY)),type="probs")
res$ValsPredictYCat <- predict(tempregpolr, data.frame(tts=I(res$ttPredictY)),type="class")
res$Std.XChapeau <- res$tt%*%t(res$pp)
rownames(res$Std.XChapeau) <- rownames(ExpliX)
names(res$CoeffC) <- paste("Coeff_Comp_Reg",1:res$computed_nt)
res$FinalModel <- tempregpolr
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
res$YChapeau <- as.matrix(predict(tempregbeta,type="response"))
rownames(res$YChapeau) <- rownames(ExpliX)
tt <- res$ttPredictY
#assign("tt", tt, envir=parent.frame(n=sys.nframe()))
res$Std.ValsPredictY <- predict(tempregbeta,newdata=data.frame(tt))
res$ValsPredictY <- predict(tempregbeta,newdata=data.frame(tt),type = "response")
res$Std.XChapeau <- res$tt%*%t(res$pp)
rownames(res$Std.XChapeau) <- rownames(ExpliX)
names(res$CoeffC) <- paste("Coeff_Comp_Reg",1:res$computed_nt)
rownames(res$Coeffs) <- c("Intercept",colnames(ExpliX))
res$FinalModel <- tempregbeta
}
rownames(res$pp) <- colnames(ExpliX)
colnames(res$pp) <- paste("Comp_",1:res$computed_nt)
rownames(res$ww) <- colnames(ExpliX)
colnames(res$ww) <- paste("Comp_",1:res$computed_nt)
rownames(res$wwnorm) <- colnames(ExpliX)
colnames(res$wwnorm) <- paste("Comp_",1:res$computed_nt)
rownames(res$wwetoile) <- colnames(ExpliX)
colnames(res$wwetoile) <- paste("Coord_Comp_",1:res$computed_nt)
rownames(res$tt) <- rownames(ExpliX)
colnames(res$tt) <- paste("Comp_",1:res$computed_nt)
res$XXwotNA <- XXwotNA
cat("****________________________________________________****\n")
cat("\n")
#if(res$computed_nt>0 & modele=="pls-beta") {rm(jj,tt,tts,XXwotNA,YwotNA,envir=parent.frame(n=sys.nframe()))}
return(res)
}
| /plsRbeta/R/PLS_beta.R | no_license | ingted/R-Examples | R | false | false | 52,900 | r | PLS_beta <- function(dataY,dataX,nt=2,limQ2set=.0975,dataPredictY=dataX,modele="pls",family=NULL,typeVC="none",EstimXNA=FALSE,scaleX=TRUE,scaleY=NULL,pvals.expli=FALSE,alpha.pvals.expli=.05,MClassed=FALSE,tol_Xi=10^(-12),weights,method,sparse=FALSE,sparseStop=TRUE,naive=FALSE,link=NULL,link.phi=NULL,type="ML") {
##################################################
# #
# Initialization and formatting the inputs #
# #
##################################################
cat("____************************************************____\n")
if(any(apply(is.na(dataX),MARGIN=2,"all"))){return(vector("list",0)); cat("One of the columns of dataX is completely filled with missing data\n"); stop()}
if(any(apply(is.na(dataX),MARGIN=1,"all"))){return(vector("list",0)); cat("One of the rows of dataX is completely filled with missing data\n"); stop()}
if(identical(dataPredictY,dataX)){PredYisdataX <- TRUE} else {PredYisdataX <- FALSE}
if(!PredYisdataX){
if(any(apply(is.na(dataPredictY),MARGIN=2,"all"))){return(vector("list",0)); cat("One of the columns of dataPredictY is completely filled with missing data\n"); stop()}
if(any(apply(is.na(dataPredictY),MARGIN=1,"all"))){return(vector("list",0)); cat("One of the rows of dataPredictY is completely filled with missing data\n"); stop()}
}
if(missing(weights)){NoWeights=TRUE} else {if(all(weights==rep(1,length(dataY)))){NoWeights=TRUE} else {NoWeights=FALSE}}
if(missing(method)){method="logistic"}
if(missing(type)){method="ML"}
if(any(is.na(dataX))) {na.miss.X <- TRUE} else na.miss.X <- FALSE
if(any(is.na(dataY))) {na.miss.Y <- TRUE} else na.miss.Y <- FALSE
if(any(is.na(dataPredictY))) {na.miss.PredictY <- TRUE} else {na.miss.PredictY <- FALSE}
if(is.null(modele)){naive=FALSE} else {if(modele=="pls"){naive=FALSE} else {if(!missing(naive)){cat(paste("Only naive DoF can be used with PLS GLM or PLS BETA\n",sep=""))}; naive=TRUE}}
if(na.miss.X|na.miss.Y){naive=TRUE; cat(paste("Only naive DoF can be used with missing data\n",sep="")); if(!NoWeights){cat(paste("Weights cannot be used with missing data\n",sep=""))}}
if(!NoWeights){naive=TRUE; cat(paste("Only naive DoF can be used with weighted PLS\n",sep=""))} else {NoWeights=TRUE}
if(sparse){pvals.expli=TRUE}
if (!is.data.frame(dataX)) {dataX <- data.frame(dataX)}
if (is.null(modele) & !is.null(family)) {modele<-"pls-glm-family"}
if (!(modele %in% c("pls","pls-glm-logistic","pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson","pls-glm-polr","pls-beta"))) {print(modele);stop("'modele' not recognized")}
if (!(modele %in% "pls-glm-family") & !is.null(family)) {stop("Set 'modele=pls-glm-family' to use the family option")}
if (!(modele %in% "pls-beta") & !is.null(link)) {stop("Set 'modele=pls-beta' to use the link option")}
if (modele=="pls") {family<-NULL}
if (modele=="pls-beta") {family<-NULL}
if (modele=="pls-glm-Gamma") {family<-Gamma(link = "inverse")}
if (modele=="pls-glm-gaussian") {family<-gaussian(link = "identity")}
if (modele=="pls-glm-inverse.gaussian") {family<-inverse.gaussian(link = "1/mu^2")}
if (modele=="pls-glm-logistic") {family<-binomial(link = "logit")}
if (modele=="pls-glm-poisson") {family<-poisson(link = "log")}
if (modele=="pls-glm-polr") {family<-NULL}
if (!is.null(family)) {
if (is.character(family)) {family <- get(family, mode = "function", envir = parent.frame(n=sys.nframe()))}
if (is.function(family)) {family <- family()}
if (is.language(family)) {family <- eval(family)}
}
if (is.null(link)){link<-"logit"} else {if(!(link %in% c("logit", "probit", "cloglog", "cauchit", "log", "loglog")) & !is(link,"link-glm")) {link<-"logit"}}
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {print(family)}
if (modele %in% c("pls-glm-polr")) {cat("\nModel:", modele, "\n");cat("Method:", method, "\n\n")}
if (modele=="pls-beta") {cat("\nModel:", modele, "\n\n");cat("Link:", link, "\n\n");cat("Link.phi:", link.phi, "\n\n");cat("Type:", type, "\n\n")}
if (modele=="pls") {cat("\nModel:", modele, "\n\n")}
scaleY <- NULL
if (is.null(scaleY)) {
if (!(modele %in% c("pls"))) {scaleY <- FALSE} else {scaleY <- TRUE}
}
if (scaleY) {if(NoWeights){RepY <- scale(dataY)} else {meanY <- weighted.mean(dataY,weights); stdevY <- sqrt((length(dataY)-1)/length(dataY)*weighted.mean((dataY-meanY)^2,weights)); RepY <- (dataY-meanY)/stdevY; attr(RepY,"scaled:center") <- meanY ; attr(RepY,"scaled:scale") <- stdevY}}
else {
RepY <- dataY
attr(RepY,"scaled:center") <- 0
attr(RepY,"scaled:scale") <- 1
}
if (scaleX) {if(NoWeights){ExpliX <- scale(dataX)} else {meanX <- apply(dataX,2,weighted.mean,weights); stdevX <- sqrt((length(dataY)-1)/length(dataY)*apply((sweep(dataX,2,meanX))^2,2,weighted.mean,weights)); ExpliX <- sweep(sweep(dataX, 2, meanX), 2 ,stdevX, "/"); attr(ExpliX,"scaled:center") <- meanX ; attr(ExpliX,"scaled:scale") <- stdevX}
if(PredYisdataX){PredictY <- ExpliX} else {PredictY <- sweep(sweep(dataPredictY, 2, attr(ExpliX,"scaled:center")), 2 ,attr(ExpliX,"scaled:scale"), "/")}
}
else {
ExpliX <- dataX
attr(ExpliX,"scaled:center") <- rep(0,ncol(dataX))
attr(ExpliX,"scaled:scale") <- rep(1,ncol(dataX))
PredictY <- (dataPredictY)
}
if(is.null(colnames(ExpliX))){colnames(ExpliX)<-paste("X",1:ncol(ExpliX),sep=".")}
if(is.null(rownames(ExpliX))){rownames(ExpliX)<-1:nrow(ExpliX)}
XXNA <- !(is.na(ExpliX))
YNA <- !(is.na(RepY))
if(PredYisdataX){PredictYNA <- XXNA} else {PredictYNA <- !is.na(PredictY)}
ExpliXwotNA <- as.matrix(ExpliX)
ExpliXwotNA[!XXNA] <- 0
XXwotNA <- as.matrix(ExpliX)
XXwotNA[!XXNA] <- 0
dataXwotNA <- as.matrix(dataX)
dataXwotNA[!XXNA] <- 0
YwotNA <- as.matrix(RepY)
YwotNA[!YNA] <- 0
dataYwotNA <- as.matrix(dataY)
dataYwotNA[!YNA] <- 0
if(PredYisdataX){PredictYwotNA <- XXwotNA} else {
PredictYwotNA <- as.matrix(PredictY)
PredictYwotNA [is.na(PredictY)] <- 0
}
if (modele == "pls-glm-polr") {
dataY <- as.factor(dataY)
YwotNA <- as.factor(YwotNA)}
res <- list(nr=nrow(ExpliX),nc=ncol(ExpliX),nt=nt,ww=NULL,wwnorm=NULL,wwetoile=NULL,tt=NULL,pp=NULL,CoeffC=NULL,uscores=NULL,YChapeau=NULL,residYChapeau=NULL,RepY=RepY,na.miss.Y=na.miss.Y,YNA=YNA,residY=RepY,ExpliX=ExpliX,na.miss.X=na.miss.X,XXNA=XXNA,residXX=ExpliX,PredictY=PredictYwotNA,RSS=rep(NA,nt),RSSresidY=rep(NA,nt),R2=rep(NA,nt),R2residY=rep(NA,nt),press.ind=NULL,press.tot=NULL,Q2cum=rep(NA, nt),family=family,ttPredictY = NULL,typeVC=typeVC,dataX=dataX,dataY=dataY)
if(NoWeights){res$weights<-rep(1L,res$nr)} else {res$weights<-weights}
res$temppred <- NULL
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
if (scaleY) {res$YChapeau=rep(attr(RepY,"scaled:center"),nrow(ExpliX))
res$residYChapeau=rep(0,nrow(ExpliX))}
else
{res$YChapeau=rep(mean(RepY),nrow(ExpliX))
res$residYChapeau=rep(mean(RepY),nrow(ExpliX))}
}
################################################
################################################
## ##
## Beginning of the loop for the components ##
## ##
################################################
################################################
res$computed_nt <- 0
break_nt <- FALSE
break_nt_sparse <- FALSE
break_nt_sparse1 <- FALSE
break_nt_vc <- FALSE
break_nt_betareg <- FALSE
for (kk in 1:nt) {
XXwotNA <- as.matrix(res$residXX)
XXwotNA[!XXNA] <- 0
YwotNA <- as.matrix(res$residY)
YwotNA[!YNA] <- 0
tempww <- rep(0,res$nc)
temptest <- sqrt(colSums(res$residXX^2, na.rm=TRUE))
if(any(temptest<tol_Xi)) {
break_nt <- TRUE
if (is.null(names(which(temptest<tol_Xi)))) {
cat(paste("Warning : ",paste(names(which(temptest<tol_Xi)),sep="",collapse=" ")," < 10^{-12}\n",sep=""))
} else {
cat(paste("Warning : ",paste((which(temptest<tol_Xi)),sep="",collapse=" ")," < 10^{-12}\n",sep=""))
}
cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))
rm(temptest)
break
}
res$computed_nt <- kk
##############################################
# #
# Weight computation for each model #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
if(NoWeights){
tempww <- t(XXwotNA)%*%YwotNA/(t(XXNA)%*%YwotNA^2)
}
if(!NoWeights){
tempww <- t(XXwotNA*weights)%*%YwotNA/(t(XXNA*weights)%*%YwotNA^2)
}
if (pvals.expli) {
tempvalpvalstep <- 2 * pnorm(-abs(tempww))
temppvalstep <- (tempvalpvalstep < alpha.pvals.expli)
if(sparse&sparseStop){
if(sum(temppvalstep)==0L){
break_nt_sparse <- TRUE}
else
{tempww[!temppvalstep] <- 0}}
res$valpvalstep <- cbind(res$valpvalstep,tempvalpvalstep)
res$pvalstep <- cbind(res$pvalstep,temppvalstep)
}
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
if (!pvals.expli) {
XXwotNA[!XXNA] <- NA
for (jj in 1:(res$nc)) {
tempww[jj] <- coef(glm(YwotNA~cbind(res$tt,XXwotNA[,jj]),family=family))[kk+1]
}
XXwotNA[!XXNA] <- 0
rm(jj)}
else {
XXwotNA[!XXNA] <- NA
tempvalpvalstep <- rep(0,res$nc)
temppvalstep <- rep(0,res$nc)
for (jj in 1:(res$nc)) {
tmww <- summary(glm(YwotNA~cbind(res$tt,XXwotNA[,jj]),family=family))$coefficients[kk+1,]
tempww[jj] <- tmww[1]
tempvalpvalstep[jj] <- tmww[4]
temppvalstep[jj] <- (tmww[4] < alpha.pvals.expli)
}
if(sparse&sparseStop){
if(sum(temppvalstep)==0L){
break_nt_sparse <- TRUE}
else
{tempww[!temppvalstep] <- 0}}
XXwotNA[!XXNA] <- 0
rm(jj)
res$valpvalstep <- cbind(res$valpvalstep,tempvalpvalstep)
res$pvalstep <- cbind(res$pvalstep,temppvalstep)
}
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
YwotNA <- as.factor(YwotNA)
if (!pvals.expli) {
XXwotNA[!XXNA] <- NA
tts <- res$tt
for (jj in 1:(res$nc)) {
tempww[jj] <- -1*MASS::polr(YwotNA~cbind(tts,XXwotNA[,jj]),na.action=na.exclude,method=method)$coef[kk]
}
XXwotNA[!XXNA] <- 0
rm(jj,tts)}
else {
XXwotNA[!XXNA] <- NA
tts <- res$tt
tempvalpvalstep <- rep(0,res$nc)
temppvalstep <- rep(0,res$nc)
for (jj in 1:(res$nc)) {
tmww <- -1*summary(MASS::polr(YwotNA~cbind(tts,XXwotNA[,jj]),na.action=na.exclude,Hess=TRUE,method=method))$coefficients[kk,]
tempww[jj] <- tmww[1]
tempvalpvalstep[jj] <- 2 * pnorm(-abs(tmww[3]))
temppvalstep[jj] <- (tempvalpvalstep[jj] < alpha.pvals.expli)
}
if(sparse&sparseStop){
if(sum(temppvalstep)==0L){
break_nt_sparse <- TRUE}
else
{tempww[!temppvalstep] <- 0}}
XXwotNA[!XXNA] <- 0
rm(jj,tts)
res$valpvalstep <- cbind(res$valpvalstep,tempvalpvalstep)
res$pvalstep <- cbind(res$pvalstep,temppvalstep)
}
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
if (!pvals.expli) {
XXwotNA[!XXNA] <- NA
tts <- res$tt
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
#assign("tts", tts, envir=parent.frame(n=sys.nframe()))
#assign("XXwotNA", XXwotNA, envir=parent.frame(n=sys.nframe()))
for (jj in 1:(res$nc)) {
#assign("jj", jj, envir=parent.frame(n=sys.nframe()))
temptempww <- try(coef(betareg::betareg(YwotNA~cbind(tts,XXwotNA[,jj]),link=link,link.phi=link.phi,type=type,phi=FALSE))[kk+1],silent=TRUE)
if(is.numeric(temptempww)){tempww[jj] <- temptempww} else {break_nt_betareg <- TRUE; break}
}
if(break_nt_betareg){
res$computed_nt <- kk-1
cat(paste("Error in betareg found\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components were thus extracted\n",sep=""))
break}
XXwotNA[!XXNA] <- 0
rm(jj,tts)}
else {
XXwotNA[!XXNA] <- NA
tts <- res$tt
tempvalpvalstep <- rep(0,res$nc)
temppvalstep <- rep(0,res$nc)
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
#assign("tts", tts, envir=parent.frame(n=sys.nframe()))
#assign("XXwotNA", XXwotNA, envir=parent.frame(n=sys.nframe()))
for (jj in 1:(res$nc)) {
#assign("jj", jj, envir=parent.frame(n=sys.nframe()))
temptempww <- try(summary(betareg::betareg(YwotNA~cbind(tts,XXwotNA[,jj]),hessian=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type))$coefficients$mean[kk+1,],silent=TRUE)
if(is.numeric(temptempww)){tmww <- temptempww} else {break_nt_betareg <- TRUE; break}
tempww[jj] <- tmww[1]
tempvalpvalstep[jj] <- tmww[4]
temppvalstep[jj] <- (tmww[4] < alpha.pvals.expli)
}
if(break_nt_betareg){
res$computed_nt <- kk-1
cat(paste("Error in betareg found\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components were thus extracted\n",sep=""))
break}
if(sparse&sparseStop){
if(sum(temppvalstep)==0L){
break_nt_sparse <- TRUE}
else
{tempww[!temppvalstep] <- 0}}
XXwotNA[!XXNA] <- 0
rm(jj,tts)
res$valpvalstep <- cbind(res$valpvalstep,tempvalpvalstep)
res$pvalstep <- cbind(res$pvalstep,temppvalstep)
}
}
##############################################
# #
# Computation of the components (model free) #
# #
##############################################
if((break_nt_sparse)&(kk==1L)){
cat(paste("No significant predictors (<",alpha.pvals.expli,") found\n",sep=""))
cat(paste("Warning only one standard component (without sparse option) was thus extracted\n",sep=""))
break_nt_sparse1 <- TRUE
}
if((break_nt_sparse)&!(kk==1L)){
res$computed_nt <- kk-1
if(!(break_nt_sparse1)){
cat(paste("No more significant predictors (<",alpha.pvals.expli,") found\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components were thus extracted\n",sep=""))
}
break}
tempwwnorm <- tempww/sqrt(drop(crossprod(tempww)))
temptt <- XXwotNA%*%tempwwnorm/(XXNA%*%(tempwwnorm^2))
temppp <- rep(0,res$nc)
for (jj in 1:(res$nc)) {
temppp[jj] <- crossprod(temptt,XXwotNA[,jj])/drop(crossprod(XXNA[,jj],temptt^2))
}
res$residXX <- XXwotNA-temptt%*%temppp
if (na.miss.X & !na.miss.Y) {
for (ii in 1:res$nr) {
if(rcond(t(cbind(res$pp,temppp)[XXNA[ii,],,drop=FALSE])%*%cbind(res$pp,temppp)[XXNA[ii,],,drop=FALSE])<tol_Xi) {
break_nt <- TRUE; res$computed_nt <- kk-1
cat(paste("Warning : reciprocal condition number of t(cbind(res$pp,temppp)[XXNA[",ii,",],,drop=FALSE])%*%cbind(res$pp,temppp)[XXNA[",ii,",],,drop=FALSE] < 10^{-12}\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))
break
}
}
rm(ii)
if(break_nt==TRUE) {break}
}
if(!PredYisdataX){
if (na.miss.PredictY & !na.miss.Y) {
for (ii in 1:nrow(PredictYwotNA)) {
if(rcond(t(cbind(res$pp,temppp)[PredictYNA[ii,],,drop=FALSE])%*%cbind(res$pp,temppp)[PredictYNA[ii,],,drop=FALSE])<tol_Xi) {
break_nt <- TRUE; res$computed_nt <- kk-1
cat(paste("Warning : reciprocal condition number of t(cbind(res$pp,temppp)[PredictYNA[",ii,",,drop=FALSE],])%*%cbind(res$pp,temppp)[PredictYNA[",ii,",,drop=FALSE],] < 10^{-12}\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components could thus be extracted\n",sep=""))
break
}
}
rm(ii)
if(break_nt==TRUE) {break}
}
}
if (modele %in% c("pls-beta")) {
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
tt<-cbind(res$tt,temptt)
#assign("tt", tt, envir=parent.frame(n=sys.nframe()))
if (kk==1) {
coeftempconstbeta <- try(coef(betareg::betareg(YwotNA~1,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)),silent=TRUE)
if(!is.numeric(coeftempconstbeta)){
res$computed_nt <- kk-1
cat(paste("Error in betareg found\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components were thus extracted\n",sep=""))
break}
rm(coeftempconstbeta)
}
coeftempregbeta <- try(coef(betareg::betareg(YwotNA~tt,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)),silent=TRUE)
if(!is.numeric(coeftempregbeta)){
res$computed_nt <- kk-1
cat(paste("Error in betareg found\n",sep=""))
cat(paste("Warning only ",res$computed_nt," components were thus extracted\n",sep=""))
break}
rm(tt,envir=parent.frame(n=sys.nframe()))
rm(tt)
rm(YwotNA,envir=parent.frame(n=sys.nframe()))
rm(coeftempregbeta)
}
res$ww <- cbind(res$ww,tempww)
res$wwnorm <- cbind(res$wwnorm,tempwwnorm)
res$tt <- cbind(res$tt,temptt)
res$pp <- cbind(res$pp,temppp)
##############################################
# #
# Computation of the coefficients #
# of the model with kk components #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
if (kk==1) {
tempCoeffC <- solve(t(res$tt[YNA])%*%res$tt[YNA])%*%t(res$tt[YNA])%*%YwotNA[YNA]
res$CoeffCFull <- matrix(c(tempCoeffC,rep(NA,nt-kk)),ncol=1)
tempCoeffConstante <- 0
} else {
if (!(na.miss.X | na.miss.Y)) {
tempCoeffC <- c(rep(0,kk-1),solve(t(res$tt[YNA,kk])%*%res$tt[YNA,kk])%*%t(res$tt[YNA,kk])%*%YwotNA[YNA])
tempCoeffConstante <- 0
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
}
else
{
tempCoeffC <- c(rep(0,kk-1),solve(t(res$tt[YNA,kk])%*%res$tt[YNA,kk])%*%t(res$tt[YNA,kk])%*%YwotNA[YNA])
tempCoeffConstante <- 0
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- diag(res$CoeffCFull)
res$CoeffConstante <- tempCoeffConstante
res$Std.Coeffs <- rbind(tempCoeffConstante,res$wwetoile%*%res$CoeffC)
rownames(res$Std.Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
if (kk==1) {
tempconstglm <- glm(YwotNA~1,family=family)
res$AIC <- AIC(tempconstglm)
res$BIC <- AIC(tempconstglm, k = log(res$nr))
res$Coeffsmodel_vals <- rbind(summary(tempconstglm)$coefficients,matrix(rep(NA,4*nt),ncol=4))
res$ChisqPearson <- crossprod(residuals.glm(tempconstglm,type="pearson"))
#if ((modele %in% c("pls-glm-logistic"))|(family$family=="binomial")) {
res$MissClassed <- sum(unclass(res$RepY)!=ifelse(predict(tempconstglm,type="response") < 0.5, 0,1))
#}
rm(tempconstglm)
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregglm))
res$BIC <- cbind(res$BIC,AIC(tempregglm, k = log(res$nr)))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals.glm(tempregglm,type="pearson")))
#if ((modele %in% c("pls-glm-logistic"))|(family$family=="binomial")) {
res$MissClassed <- cbind(res$MissClassed,sum(unclass(res$RepY)!=ifelse(predict(tempregglm,type="response") < 0.5, 0,1)))
#}
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- matrix(c(tempCoeffC,rep(NA,nt-kk)),ncol=1)
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- tempCoeffConstante
tempCoeffC <- tempCoeffC[-1]
} else {
if (!(na.miss.X | na.miss.Y)) {
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregglm))
res$BIC <- cbind(res$BIC,AIC(tempregglm, k = log(res$nr)))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals.glm(tempregglm,type="pearson")))
#if ((modele %in% c("pls-glm-logistic"))|(family$family=="binomial")) {
res$MissClassed <- cbind(res$MissClassed,sum(unclass(res$RepY)!=ifelse(predict(tempregglm,type="response") < 0.5, 0,1)))
#}
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
else
{
tt<-res$tt
tempregglm <- glm(YwotNA~tt,family=family)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregglm))
res$BIC <- cbind(res$BIC,AIC(tempregglm, k = log(res$nr)))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregglm)$coefficients,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals.glm(tempregglm,type="pearson")))
#if ((modele %in% c("pls-glm-logistic"))|(family$family=="binomial")) {
res$MissClassed <- cbind(res$MissClassed,sum(unclass(res$RepY)!=ifelse(predict(tempregglm,type="response") < 0.5, 0,1)))
#}
tempCoeffC <- as.vector(coef(tempregglm))
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- tempCoeffC
res$Std.Coeffs <- rbind(tempCoeffConstante,res$wwetoile%*%res$CoeffC)
rownames(res$Std.Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
Varyy <- function(piVaryy) {
diag(piVaryy[-length(piVaryy)])-piVaryy[-length(piVaryy)]%*%t(piVaryy[-length(piVaryy)])
}
Chisqcomp <- function(yichisq,pichisq) {
t(yichisq[-length(yichisq)]-pichisq[-length(pichisq)])%*%MASS::ginv(Varyy(pichisq))%*%(yichisq[-length(yichisq)]-pichisq[-length(pichisq)])
}
Chiscompmatrix <- function(rowspi,rowsyi) {
sum(mapply(Chisqcomp,rowsyi,rowspi))
}
if (kk==1) {
tempconstpolr <- MASS::polr(YwotNA~1,na.action=na.exclude,Hess=TRUE,method=method)
res$AIC <- AIC(tempconstpolr)
res$BIC <- AIC(tempconstpolr, k = log(res$nr))
res$MissClassed <- sum(!(unclass(predict(tempconstpolr,type="class"))==unclass(res$RepY)))
res$Coeffsmodel_vals <- rbind(summary(tempconstpolr)$coefficients,matrix(rep(NA,3*nt),ncol=3))
tempmodord <- predict(tempconstpolr,type="class")
tempfff <- ~tempmodord-1
tempm <- model.frame(tempfff, tempmodord)
tempmat <- model.matrix(tempfff, model.frame(tempfff, tempmodord))
res$ChisqPearson <- sum(Chiscompmatrix(as.list(as.data.frame(t(predict(tempconstpolr,type="probs")))),as.list(as.data.frame(t(tempmat)))))
rm(tempconstpolr)
tts<-res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$AIC <- cbind(res$AIC,AIC(tempregpolr))
res$BIC <- cbind(res$BIC,AIC(tempregpolr, k = log(res$nr)))
res$MissClassed <- cbind(res$MissClassed,sum(!(unclass(predict(tempregpolr,type="class"))==unclass(res$RepY))))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempmodord <- predict(tempregpolr,type="class")
tempfff <- ~tempmodord-1
tempm <- model.frame(tempfff, tempmodord)
tempmat <- model.matrix(tempfff, model.frame(tempfff, tempmodord))
res$ChisqPearson <- c(res$ChisqPearson,sum(Chiscompmatrix(as.list(as.data.frame(t(predict(tempregpolr,type="probs")))),as.list(as.data.frame(t(tempmat))))))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- matrix(c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)),ncol=1)
res$CoeffConstante <- tempCoeffConstante
} else {
if (!(na.miss.X | na.miss.Y)) {
tts <- res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$AIC <- cbind(res$AIC,AIC(tempregpolr))
res$BIC <- cbind(res$BIC,AIC(tempregpolr, k = log(res$nr)))
res$MissClassed <- cbind(res$MissClassed,sum(!(unclass(predict(tempregpolr,type="class"))==unclass(res$RepY))))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempmodord <- predict(tempregpolr,type="class")
tempfff <- ~tempmodord-1
tempm <- model.frame(tempfff, tempmodord)
tempmat <- model.matrix(tempfff, model.frame(tempfff, tempmodord))
res$ChisqPearson <- c(res$ChisqPearson,sum(Chiscompmatrix(as.list(as.data.frame(t(predict(tempregpolr,type="probs")))),as.list(as.data.frame(t(tempmat))))))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)))
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
}
else
{
tts<-res$tt
tempregpolr <- MASS::polr(YwotNA~tts,na.action=na.exclude,Hess=TRUE,method=method)
rm(tts)
res$AIC <- cbind(res$AIC,AIC(tempregpolr))
res$BIC <- cbind(res$BIC,AIC(tempregpolr, k = log(res$nr)))
res$MissClassed <- cbind(res$MissClassed,sum(!(unclass(predict(tempregpolr,type="class"))==unclass(res$RepY))))
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregpolr)$coefficients,matrix(rep(NA,3*(nt-kk)),ncol=3)))
tempmodord <- predict(tempregpolr,type="class")
tempfff <- ~tempmodord-1
tempm <- model.frame(tempfff, tempmodord)
tempmat <- model.matrix(tempfff, model.frame(tempfff, tempmodord))
res$ChisqPearson <- c(res$ChisqPearson,sum(Chiscompmatrix(as.list(as.data.frame(t(predict(tempregpolr,type="probs")))),as.list(as.data.frame(t(tempmat))))))
tempCoeffC <- -1*as.vector(tempregpolr$coef)
tempCoeffConstante <- as.vector(tempregpolr$zeta)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffConstante,tempCoeffC,rep(NA,nt-kk)))
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- tempCoeffC
res$Std.Coeffs <- as.matrix(rbind(as.matrix(tempCoeffConstante),res$wwetoile%*%res$CoeffC))
rownames(res$Std.Coeffs) <- c(names(tempregpolr$zeta),colnames(ExpliX))
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
if (kk==1) {
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
tempconstbeta <- betareg::betareg(YwotNA~1,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)
res$AIC <- AIC(tempconstbeta)
res$BIC <- AIC(tempconstbeta, k = log(res$nr))
res$pseudo.R2 <- NULL
res$Coeffsmodel_vals <- rbind(summary(tempconstbeta)$coefficients$mean,matrix(rep(NA,4*nt),ncol=4))
res$ChisqPearson <- crossprod(residuals(tempconstbeta,type="pearson"))
rm(tempconstbeta)
tt<-res$tt
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
#assign("tt", tt, envir=parent.frame(n=sys.nframe()))
tempregbeta <- betareg::betareg(YwotNA~tt,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregbeta))
res$BIC <- cbind(res$BIC,AIC(tempregbeta, k = log(res$nr)))
res$pseudo.R2 <- cbind(res$pseudo.R2,tempregbeta$pseudo.r.squared)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregbeta)$coefficients$mean,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals(tempregbeta,type="pearson")))
tempCoeffC <- as.vector(tempregbeta$coefficients$mean)
res$CoeffCFull <- matrix(c(tempCoeffC,rep(NA,nt-kk)),ncol=1)
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- tempCoeffConstante
tempCoeffC <- tempCoeffC[-1]
} else {
if (!(na.miss.X | na.miss.Y)) {
tt<-res$tt
#assign("tt", tt, envir=parent.frame(n=sys.nframe()))
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
tempregbeta <- betareg::betareg(YwotNA~tt,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregbeta))
res$BIC <- cbind(res$BIC,AIC(tempregbeta, k = log(res$nr)))
res$pseudo.R2 <- cbind(res$pseudo.R2,tempregbeta$pseudo.r.squared)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregbeta)$coefficients$mean,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals(tempregbeta,type="pearson")))
tempCoeffC <- as.vector(tempregbeta$coefficients$mean)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
else
{
tt<-res$tt
#assign("tt", tt, envir=parent.frame(n=sys.nframe()))
#assign("YwotNA", YwotNA, envir=parent.frame(n=sys.nframe()))
tempregbeta <- betareg::betareg(YwotNA~tt,hessian=TRUE,model=TRUE,link=link,phi=FALSE,link.phi=link.phi,type=type)
rm(tt)
res$AIC <- cbind(res$AIC,AIC(tempregbeta))
res$BIC <- cbind(res$BIC,AIC(tempregbeta, k = log(res$nr)))
res$pseudo.R2 <- cbind(res$pseudo.R2,tempregbeta$pseudo.r.squared)
res$Coeffsmodel_vals <- cbind(res$Coeffsmodel_vals,rbind(summary(tempregbeta)$coefficients$mean,matrix(rep(NA,4*(nt-kk)),ncol=4)))
res$ChisqPearson <- c(res$ChisqPearson,crossprod(residuals(tempregbeta,type="pearson")))
tempCoeffC <- as.vector(tempregbeta$coefficients$mean)
res$CoeffCFull <- cbind(res$CoeffCFull,c(tempCoeffC,rep(NA,nt-kk)))
tempCoeffConstante <- tempCoeffC[1]
res$CoeffConstante <- cbind(res$CoeffConstante,tempCoeffConstante)
tempCoeffC <- tempCoeffC[-1]
}
}
res$wwetoile <- (res$wwnorm)%*%solve(t(res$pp)%*%res$wwnorm)
res$CoeffC <- tempCoeffC
res$Std.Coeffs <- rbind(tempCoeffConstante,res$wwetoile%*%res$CoeffC)
rownames(res$Std.Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
# #
# Prediction of the components #
# as if missing values (model free) #
# For cross-validating the GLM #
# #
##############################################
if (!(na.miss.X | na.miss.Y)) {
##############################################
# #
# Cross validation #
# without missing value #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$residYChapeau <- res$tt%*%tempCoeffC
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$tt%*%res$CoeffC
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
res$residYChapeau <- tempregglm$linear.predictors
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*res$Std.Coeffs[1]
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- tempregglm$fitted.values
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*tempCoeffConstante
res$Coeffs <- rbind(as.matrix(tempConstante),tempCoeffs)
rownames(res$Coeffs) <- rownames(res$Std.Coeffs)
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
res$residYChapeau <- predict(tempregbeta,type="link")
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*res$Std.Coeffs[1]
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- predict(tempregbeta,type="response")
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
}
else {
if (na.miss.X & !na.miss.Y) {
##############################################
# #
# Cross validation #
# with missing value(s) #
# #
##############################################
if (kk==1) {
cat("____There are some NAs in X but not in Y____\n")
}
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$tt%*%res$CoeffC
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
res$residYChapeau <- tempregglm$linear.predictors
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*res$Std.Coeffs[1]
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- tempregglm$fitted.values
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")* tempCoeffConstante
res$Coeffs <- rbind(as.matrix(tempConstante),tempCoeffs)
rownames(res$Coeffs) <- rownames(res$Std.Coeffs)
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
res$residYChapeau <- predict(tempregbeta,type="link")
if (kk==1) {
if(NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY))
}
if(!NoWeights){
res$RSSresidY <- crossprod(RepY-mean(RepY),weights*(RepY-mean(RepY)))
}
}
if(NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau))
}
if(!NoWeights){
res$RSSresidY <- cbind(res$RSSresidY,crossprod(res$residY-res$residYChapeau,weights*(res$residY-res$residYChapeau)))
}
tempCoeffs <- res$wwetoile%*%res$CoeffC*attr(res$RepY,"scaled:scale")/attr(res$ExpliX,"scaled:scale")
tempConstante <- attr(res$RepY,"scaled:center")-sum(tempCoeffs*attr(res$ExpliX,"scaled:center"))+attr(res$RepY,"scaled:scale")*res$Std.Coeffs[1]
res$Coeffs <- rbind(tempConstante,tempCoeffs)
res$YChapeau <- predict(tempregbeta,type="response")
res$Yresidus <- dataY-res$YChapeau
if (kk==1) {
if(NoWeights){
res$RSS <- crossprod(dataY-mean(dataY))
}
if(!NoWeights){
res$RSS <- crossprod(dataY-mean(dataY),weights*(dataY-mean(dataY)))
}
}
if(NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus))
}
if(!NoWeights){
res$RSS <- cbind(res$RSS,crossprod(res$Yresidus,weights*res$Yresidus))
}
}
##############################################
}
else {
if (kk==1) {
cat("____There are some NAs both in X and Y____\n")
}
}
}
##############################################
# #
# Update and end of loop cleaning #
# (Especially useful for PLS) #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$uscores <- cbind(res$uscores,res$residY/res$CoeffC[kk])
res$residY <- res$residY - res$tt%*%tempCoeffC
res$residusY <- cbind(res$residusY,res$residY)
if (kk==1) {
res$AIC.std <- AIC(lm(res$RepY~1,weights=res$weights))
res$AIC.std <- cbind(res$AIC.std,AICpls(kk,res$residY,weights=res$weights))
res$AIC <- AIC(lm(dataY~1))
res$AIC <- cbind(res$AIC,AICpls(kk,res$Yresidus,weights=res$weights))
if (MClassed) {
res$MissClassed <- sum(unclass(dataY)!=ifelse(predict(lm(dataY~1,weights=res$weights)) < 0.5, 0,1))
res$MissClassed <- cbind(res$MissClassed,sum(unclass(dataY)!=ifelse(res$YChapeau < 0.5, 0,1)))
tempprob <- res$Probs <- predict(lm(dataY~1,weights=res$weights))
tempprob <- ifelse(tempprob<0,0,tempprob)
res$Probs.trc <- ifelse(tempprob>1,1,tempprob)
res$Probs <- cbind(res$Probs,res$YChapeau)
tempprob <- ifelse(res$YChapeau<0,0,res$YChapeau)
tempprob <- ifelse(tempprob>1,1,tempprob)
res$Probs.trc <- cbind(res$Probs.trc,tempprob)
}
} else {
res$AIC.std <- cbind(res$AIC.std,AICpls(kk,res$residY,weights=res$weights))
res$AIC <- cbind(res$AIC,AICpls(kk,res$Yresidus,weights=res$weights))
if (MClassed) {
res$MissClassed <- cbind(res$MissClassed,sum(unclass(dataY)!=ifelse(res$YChapeau < 0.5, 0,1)))
res$Probs <- cbind(res$Probs,res$YChapeau)
tempprob <- ifelse(res$YChapeau<0,0,res$YChapeau)
tempprob <- ifelse(tempprob>1,1,tempprob)
res$Probs.trc <- cbind(res$Probs.trc,tempprob)
}
}
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
res$residY <- res$residY
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
res$residY <- res$residY
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
res$residY <- res$residY
res$residusY <- cbind(res$residusY,res$residY)
rm(tempww)
rm(tempwwnorm)
rm(temptt)
rm(temppp)
rm(tempCoeffC)
rm(tempCoeffs)
rm(tempConstante)
}
cat("____Component____",kk,"____\n")
}
##############################################
##############################################
## ##
## End of the loop on the components ##
## ##
##############################################
##############################################
if(res$computed_nt==0){
cat("No component could be extracted please check the data for NA only lines or columns\n"); stop()
}
if (pvals.expli&!(modele=="pls")) {
res$Coeffsmodel_vals<-res$Coeffsmodel_vals[1:(dim(res$Coeffsmodel_vals)[1]-(nt-res$computed_nt)),]
}
##############################################
# #
# Predicting components #
# #
##############################################
if (!(na.miss.PredictY | na.miss.Y)) {
cat("____Predicting X without NA neither in X nor in Y____\n")
res$ttPredictY <- PredictYwotNA%*%res$wwetoile
colnames(res$ttPredictY) <- paste("tt",1:res$computed_nt,sep="")
}
else {
if (na.miss.PredictY & !na.miss.Y) {
cat("____Predicting X with NA in X and not in Y____\n")
for (ii in 1:nrow(PredictYwotNA)) {
res$ttPredictY <- rbind(res$ttPredictY,t(solve(t(res$pp[PredictYNA[ii,],,drop=FALSE])%*%res$pp[PredictYNA[ii,],,drop=FALSE])%*%t(res$pp[PredictYNA[ii,],,drop=FALSE])%*%(PredictYwotNA[ii,])[PredictYNA[ii,]]))
}
colnames(res$ttPredictY) <- paste("tt",1:res$computed_nt,sep="")
}
else {
cat("____There are some NAs both in X and Y____\n")
}
}
##############################################
# #
# Computing RSS, PRESS, #
# Chi2, Q2 and Q2cum #
# #
##############################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$R2residY <- 1-res$RSSresidY[2:(res$computed_nt+1)]/res$RSSresidY[1]
res$R2 <- 1-res$RSS[2:(res$computed_nt+1)]/res$RSS[1]
if (MClassed==FALSE) {
res$InfCrit <- t(rbind(res$AIC, res$RSS, c(NA,res$R2), c(NA,res$R2residY), res$RSSresidY, res$AIC.std))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "RSS_Y", "R2_Y", "R2_residY", "RSS_residY", "AIC.std"))
res$ic.dof<-infcrit.dof(res,naive=naive)
res$InfCrit <- cbind(res$InfCrit,res$ic.dof)
} else {
res$InfCrit <- t(rbind(res$AIC, res$RSS, c(NA,res$R2), res$MissClassed, c(NA,res$R2residY), res$RSSresidY, res$AIC.std))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "RSS_Y", "R2_Y", "MissClassed", "R2_residY", "RSS_residY", "AIC.std"))
res$ic.dof<-infcrit.dof(res,naive=naive)
res$InfCrit <- cbind(res$InfCrit,res$ic.dof)
}
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
res$R2residY <- 1-res$RSSresidY[2:(res$computed_nt+1)]/res$RSSresidY[1]
res$R2 <- 1-res$RSS[2:(res$computed_nt+1)]/res$RSS[1]
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-poisson")) {
res$InfCrit <- t(rbind(res$AIC, res$BIC, res$ChisqPearson, res$RSS, c(NA,res$R2), c(NA,res$R2residY), res$RSSresidY))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "BIC", "Chi2_Pearson_Y", "RSS_Y", "R2_Y", "R2_residY", "RSS_residY"))
}
if ((modele %in% c("pls-glm-logistic"))|(family$family=="binomial")) {
res$InfCrit <- t(rbind(res$AIC, res$BIC, res$MissClassed, res$ChisqPearson, res$RSS, c(NA,res$R2), c(NA,res$R2residY), res$RSSresidY))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "BIC", "Missclassed", "Chi2_Pearson_Y", "RSS_Y", "R2_Y", "R2_residY", "RSS_residY"))
}
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele == "pls-glm-polr") {
res$InfCrit <- t(rbind(res$AIC, res$BIC, res$MissClassed, res$ChisqPearson))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "BIC", "Missclassed", "Chi2_Pearson_Y"))
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
res$R2residY <- 1-res$RSSresidY[2:(res$computed_nt+1)]/res$RSSresidY[1]
res$R2 <- 1-res$RSS[2:(res$computed_nt+1)]/res$RSS[1]
res$InfCrit <- t(rbind(res$AIC, res$BIC, res$ChisqPearson, res$RSS, c(NA,res$pseudo.R2), c(NA,res$R2)))
dimnames(res$InfCrit) <- list(paste("Nb_Comp_",0:res$computed_nt,sep=""), c("AIC", "BIC", "Chi2_Pearson_Y", "RSS_Y", "pseudo_R2_Y", "R2_Y"))
}
##########################################
# #
# Predicting responses #
# #
##########################################
##############################################
###### PLS ######
##############################################
if (modele == "pls") {
res$YChapeau <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$tt%*%res$CoeffC
rownames(res$YChapeau) <- rownames(ExpliX)
res$Std.ValsPredictY <- res$ttPredictY%*%res$CoeffC
res$ValsPredictY <- attr(res$RepY,"scaled:center")+attr(res$RepY,"scaled:scale")*res$ttPredictY%*%res$CoeffC
res$Std.XChapeau <- res$tt%*%t(res$pp)
rownames(res$Std.XChapeau) <- rownames(ExpliX)
if (EstimXNA) {
res$XChapeau <- sweep(sweep(res$Std.XChapeau,2,attr(res$ExpliX,"scaled:scale"),FUN="*"),2,attr(res$ExpliX,"scaled:center"),FUN="+")
rownames(res$XChapeau) <- rownames(ExpliX)
colnames(res$XChapeau) <- colnames(ExpliX)
res$XChapeauNA <- sweep(sweep(res$Std.XChapeau,2,attr(res$ExpliX,"scaled:scale"),FUN="*"),2,attr(res$ExpliX,"scaled:center"),FUN="+")*!XXNA
rownames(res$XChapeau) <- rownames(ExpliX)
colnames(res$XChapeau) <- colnames(ExpliX)
}
names(res$CoeffC) <- paste("Coeff_Comp_Reg",1:res$computed_nt)
rownames(res$Coeffs) <- c("Intercept",colnames(ExpliX))
}
##############################################
###### PLS-GLM ######
##############################################
if (modele %in% c("pls-glm-family","pls-glm-Gamma","pls-glm-gaussian","pls-glm-inverse.gaussian","pls-glm-logistic","pls-glm-poisson")) {
res$YChapeau <- as.matrix(tempregglm$fitted.values)
rownames(res$YChapeau) <- rownames(ExpliX)
tt <- res$ttPredictY
res$Std.ValsPredictY <- predict(tempregglm,newdata=data.frame(tt))
res$ValsPredictY <- predict(tempregglm,newdata=data.frame(tt),type = "response")
res$Std.XChapeau <- res$tt%*%t(res$pp)
rownames(res$Std.XChapeau) <- rownames(ExpliX)
names(res$CoeffC) <- paste("Coeff_Comp_Reg",1:res$computed_nt)
rownames(res$Coeffs) <- c("Intercept",colnames(ExpliX))
res$FinalModel <- tempregglm
}
##############################################
###### PLS-GLM-POLR ######
##############################################
if (modele %in% c("pls-glm-polr")) {
res$YChapeau <- tempregpolr$fitted.values
res$YChapeauCat <- predict(tempregpolr,type="class")
rownames(res$YChapeau) <- rownames(ExpliX)
res$ValsPredictY <- predict(tempregpolr, data.frame(tts=I(res$ttPredictY)),type="probs")
res$ValsPredictYCat <- predict(tempregpolr, data.frame(tts=I(res$ttPredictY)),type="class")
res$Std.XChapeau <- res$tt%*%t(res$pp)
rownames(res$Std.XChapeau) <- rownames(ExpliX)
names(res$CoeffC) <- paste("Coeff_Comp_Reg",1:res$computed_nt)
res$FinalModel <- tempregpolr
}
##############################################
###### PLS-BETA ######
##############################################
if (modele %in% c("pls-beta")) {
res$YChapeau <- as.matrix(predict(tempregbeta,type="response"))
rownames(res$YChapeau) <- rownames(ExpliX)
tt <- res$ttPredictY
#assign("tt", tt, envir=parent.frame(n=sys.nframe()))
res$Std.ValsPredictY <- predict(tempregbeta,newdata=data.frame(tt))
res$ValsPredictY <- predict(tempregbeta,newdata=data.frame(tt),type = "response")
res$Std.XChapeau <- res$tt%*%t(res$pp)
rownames(res$Std.XChapeau) <- rownames(ExpliX)
names(res$CoeffC) <- paste("Coeff_Comp_Reg",1:res$computed_nt)
rownames(res$Coeffs) <- c("Intercept",colnames(ExpliX))
res$FinalModel <- tempregbeta
}
rownames(res$pp) <- colnames(ExpliX)
colnames(res$pp) <- paste("Comp_",1:res$computed_nt)
rownames(res$ww) <- colnames(ExpliX)
colnames(res$ww) <- paste("Comp_",1:res$computed_nt)
rownames(res$wwnorm) <- colnames(ExpliX)
colnames(res$wwnorm) <- paste("Comp_",1:res$computed_nt)
rownames(res$wwetoile) <- colnames(ExpliX)
colnames(res$wwetoile) <- paste("Coord_Comp_",1:res$computed_nt)
rownames(res$tt) <- rownames(ExpliX)
colnames(res$tt) <- paste("Comp_",1:res$computed_nt)
res$XXwotNA <- XXwotNA
cat("****________________________________________________****\n")
cat("\n")
#if(res$computed_nt>0 & modele=="pls-beta") {rm(jj,tt,tts,XXwotNA,YwotNA,envir=parent.frame(n=sys.nframe()))}
return(res)
}
|
contractions <- read.csv2("contractions.csv",stringsAsFactors=FALSE)
transvr <- names(vr)
for (i in 1:nrow(contractions)) {
from <- paste0("^",contractions[i,"word"],"$")
to <- contractions[i,"trans"]
transvr <- gsub(from,to,transvr)
}
names(vr) <- transvr
save(vr,file="data/vocabulary-reduced.rds")
save(contractions,file="data/contractions.rds")
| /trans-contractions.R | no_license | mreeddev/wordprev | R | false | false | 373 | r | contractions <- read.csv2("contractions.csv",stringsAsFactors=FALSE)
transvr <- names(vr)
for (i in 1:nrow(contractions)) {
from <- paste0("^",contractions[i,"word"],"$")
to <- contractions[i,"trans"]
transvr <- gsub(from,to,transvr)
}
names(vr) <- transvr
save(vr,file="data/vocabulary-reduced.rds")
save(contractions,file="data/contractions.rds")
|
#' CYTARPersonas
#' @author kenarab
#' @importFrom R6 R6Class
#' @import dplyr
#' @import magrittr
#' @import testthat
#' @export
CYTARPersonas <- R6Class("CYTARPersonas",
inherit = CYTARDatasource,
public = list(
initialize = function(){
super$initialize(data.url = "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/8ab77b16-f1a8-4d3f-b664-67becf83a9b9/download/personas.csv",
data.filename = "personas.csv",
col.types = cols(
persona_id = col_double(),
nombre = col_character(),
apellido = col_character(),
sexo_id = col_double(),
edad = col_double(),
cvar_ultimo_acceso = col_date(format = "")
))
self
},
consolidate = function(){
self$configure()
self$loadData()
self$data
}))
#' CYTARPersonasAnio
#' @author kenarab
#' @importFrom R6 R6Class
#' @import dplyr
#' @import magrittr
#' @import testthat
#' @export
CYTARPersonasAnio <- R6Class("CYTARPersonasAnio",
inherit = CYTARDatasource,
public = list(
disciplinas.ref = NA,
categoria.conicet.ref = NA,
tipo.personal.ref = NA,
categorias.summary = NA,
initialize = function(year, data.url, disciplinas.ref){
url.splitted <- strsplit(data.url, split = "/")[[1]]
super$initialize(data.url = data.url,
data.filename = url.splitted[length(url.splitted)],
col.types =
cols(
.default = col_double(),
seniority_level = col_character()
)
)
self
},
configure = function(){
if (!self$configured){
self$disciplinas.ref <- CYTARDisciplinasRef$new()
self$tipo.personal.ref <- CYTARTipoPersonalRef$new()
self$categoria.conicet.ref <- CYTARCategoriaConicetRef$new()
self$disciplinas.ref$consolidate()
self$tipo.personal.ref$consolidate()
self$categoria.conicet.ref$consolidate()
self$configured <- TRUE
}
},
checkConsolidatedFields = function(fields){
},
consolidate = function(){
self$configure()
self$loadData()
self.debug <<- self
self$disciplinas.ref$data$disciplina_id
disciplina.experticia <- self$disciplinas.ref$data
names(disciplina.experticia) <- gsub("disciplina_", "", names(disciplina.experticia))
names(disciplina.experticia) <- paste("disciplina_experticia_", names(disciplina.experticia), sep = "")
self$data %<>% left_join(disciplina.experticia, by = "disciplina_experticia_id")
names(self$data)
self$data %<>% left_join(self$tipo.personal.ref$data, by = "tipo_personal_id")
self$data %<>% left_join(self$categoria.conicet.ref$data, by = "categoria_conicet_id")
self$categorias.summary <- self$data %>%
group_by(categoria_conicet_descripcion, tipo_personal_descripcion) %>%
summarize(n = n()) %>% arrange(-n)
names(self$data)
self$data
}))
#' CYTARPersonasAnioDownloader
#' @author kenarab
#' @importFrom R6 R6Class
#' @import dplyr
#' @import magrittr
#' @import testthat
#' @export
CYTARPersonasAnioDownloader <- R6Class("CYTARPersonasAnioDownloader",
public = list(
personas.year.url = NA,
initialize = function(){
self$personas.year.url <- list()
self
},
configure = function(){
self$personas.year.url[["2011"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/11dca5bb-9a5f-4da5-b040-28957126be18/download/personas_2011.csv"
self$personas.year.url[["2012"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/5d49a616-2fc1-4270-8b09-73f1f5cdd335/download/personas_2012.csv"
self$personas.year.url[["2013"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/0fb38a7e-829b-4128-b318-4affd51c022c/download/personas_2013.csv"
self$personas.year.url[["2014"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/0c8dfedc-a2b5-4c0a-8e78-eed5fe90025f/download/personas_2014.csv"
self$personas.year.url[["2015"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/b5c212d2-104f-426c-95d0-25ac5bf819d8/download/personas_2015.csv"
self$personas.year.url[["2016"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/2fbdbf08-4de0-4a1b-92d5-d16751757ab8/download/personas_2016.csv"
self$personas.year.url[["2017"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/ff318872-775a-4403-bff5-a1c5cdeb85ea/download/personas_2017.csv"
self$personas.year.url[["2018"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/7b07fb44-64c3-4902-ab73-f59d4ed8a2f5/download/personas_2018.csv"
},
generatePersonasYear = function(year){
ret <- NULL
year <- as.character(year)
if (!year %in% names(self$personas.year.url)){
stop(paste("Año", year, "sin información disponible sobre personas CYTAR"))
}
else{
ret <- CYTARPersonasAnio$new(year, self$personas.year.url[[year]])
ret$consolidate()
}
ret
}
))
| /R/cytar-personas.R | no_license | rOpenStats/CYTAR | R | false | false | 5,559 | r |
#' CYTARPersonas
#' @author kenarab
#' @importFrom R6 R6Class
#' @import dplyr
#' @import magrittr
#' @import testthat
#' @export
CYTARPersonas <- R6Class("CYTARPersonas",
inherit = CYTARDatasource,
public = list(
initialize = function(){
super$initialize(data.url = "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/8ab77b16-f1a8-4d3f-b664-67becf83a9b9/download/personas.csv",
data.filename = "personas.csv",
col.types = cols(
persona_id = col_double(),
nombre = col_character(),
apellido = col_character(),
sexo_id = col_double(),
edad = col_double(),
cvar_ultimo_acceso = col_date(format = "")
))
self
},
consolidate = function(){
self$configure()
self$loadData()
self$data
}))
#' CYTARPersonasAnio
#' @author kenarab
#' @importFrom R6 R6Class
#' @import dplyr
#' @import magrittr
#' @import testthat
#' @export
CYTARPersonasAnio <- R6Class("CYTARPersonasAnio",
inherit = CYTARDatasource,
public = list(
disciplinas.ref = NA,
categoria.conicet.ref = NA,
tipo.personal.ref = NA,
categorias.summary = NA,
initialize = function(year, data.url, disciplinas.ref){
url.splitted <- strsplit(data.url, split = "/")[[1]]
super$initialize(data.url = data.url,
data.filename = url.splitted[length(url.splitted)],
col.types =
cols(
.default = col_double(),
seniority_level = col_character()
)
)
self
},
configure = function(){
if (!self$configured){
self$disciplinas.ref <- CYTARDisciplinasRef$new()
self$tipo.personal.ref <- CYTARTipoPersonalRef$new()
self$categoria.conicet.ref <- CYTARCategoriaConicetRef$new()
self$disciplinas.ref$consolidate()
self$tipo.personal.ref$consolidate()
self$categoria.conicet.ref$consolidate()
self$configured <- TRUE
}
},
checkConsolidatedFields = function(fields){
},
consolidate = function(){
self$configure()
self$loadData()
self.debug <<- self
self$disciplinas.ref$data$disciplina_id
disciplina.experticia <- self$disciplinas.ref$data
names(disciplina.experticia) <- gsub("disciplina_", "", names(disciplina.experticia))
names(disciplina.experticia) <- paste("disciplina_experticia_", names(disciplina.experticia), sep = "")
self$data %<>% left_join(disciplina.experticia, by = "disciplina_experticia_id")
names(self$data)
self$data %<>% left_join(self$tipo.personal.ref$data, by = "tipo_personal_id")
self$data %<>% left_join(self$categoria.conicet.ref$data, by = "categoria_conicet_id")
self$categorias.summary <- self$data %>%
group_by(categoria_conicet_descripcion, tipo_personal_descripcion) %>%
summarize(n = n()) %>% arrange(-n)
names(self$data)
self$data
}))
#' CYTARPersonasAnioDownloader
#' @author kenarab
#' @importFrom R6 R6Class
#' @import dplyr
#' @import magrittr
#' @import testthat
#' @export
CYTARPersonasAnioDownloader <- R6Class("CYTARPersonasAnioDownloader",
public = list(
personas.year.url = NA,
initialize = function(){
self$personas.year.url <- list()
self
},
configure = function(){
self$personas.year.url[["2011"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/11dca5bb-9a5f-4da5-b040-28957126be18/download/personas_2011.csv"
self$personas.year.url[["2012"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/5d49a616-2fc1-4270-8b09-73f1f5cdd335/download/personas_2012.csv"
self$personas.year.url[["2013"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/0fb38a7e-829b-4128-b318-4affd51c022c/download/personas_2013.csv"
self$personas.year.url[["2014"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/0c8dfedc-a2b5-4c0a-8e78-eed5fe90025f/download/personas_2014.csv"
self$personas.year.url[["2015"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/b5c212d2-104f-426c-95d0-25ac5bf819d8/download/personas_2015.csv"
self$personas.year.url[["2016"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/2fbdbf08-4de0-4a1b-92d5-d16751757ab8/download/personas_2016.csv"
self$personas.year.url[["2017"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/ff318872-775a-4403-bff5-a1c5cdeb85ea/download/personas_2017.csv"
self$personas.year.url[["2018"]] <- "https://datasets.datos.mincyt.gob.ar/dataset/06ae9728-c376-47bd-9c41-fbdca68707c6/resource/7b07fb44-64c3-4902-ab73-f59d4ed8a2f5/download/personas_2018.csv"
},
generatePersonasYear = function(year){
ret <- NULL
year <- as.character(year)
if (!year %in% names(self$personas.year.url)){
stop(paste("Año", year, "sin información disponible sobre personas CYTAR"))
}
else{
ret <- CYTARPersonasAnio$new(year, self$personas.year.url[[year]])
ret$consolidate()
}
ret
}
))
|
\name{determinant.lrpd}
\alias{determinant.lrpd}
\title{
Determinant
}
\description{
This function efficiently computes the determinant of an lrpd matrix.
}
\usage{
\method{determinant}{lrpd}(object, logarithm = TRUE, ...)
}
\arguments{
\item{object}{
an object of class "lrpd".
}
\item{logarithm}{
logical; If TRUE (default) return the logarithm of the determinant.
}
\item{\dots}{
not used.
}
}
\value{
\item{scalar}{(logarithm of) the determinant.}
}
\author{
Ye Wang (Eric)
Maintainer: \email{ericwang921198@gmail.com}
}
\seealso{
\code{\link{solve.lrpd}} for inverse.
}
\examples{
library(lrpd)
set.seed(2)
K <- 1000
L <- matrix(rnorm(K*floor(K/10)),K,floor(K/10))
Sl <- matrix(rnorm(floor(K/10)*floor(K/10)),floor(K/10),floor(K/10))
S <- Sl\%*\%t(Sl)+diag(rnorm(floor(K/10))^2)
N <- rnorm(K)^2
R <- L\%*\%S\%*\%t(L) + diag(N)
mat <- lrpd(N,L,S)
system.time(RI1 <- as.numeric(determinant(R)$modulus))
system.time(RI2 <- determinant(mat))
all.equal(RI1,RI2)
}
\keyword{algebra}
| /lrpd/man/determinant.lrpd.Rd | no_license | ericyewang/R-Package-lrpd | R | false | false | 993 | rd | \name{determinant.lrpd}
\alias{determinant.lrpd}
\title{
Determinant
}
\description{
This function efficiently computes the determinant of an lrpd matrix.
}
\usage{
\method{determinant}{lrpd}(object, logarithm = TRUE, ...)
}
\arguments{
\item{object}{
an object of class "lrpd".
}
\item{logarithm}{
logical; If TRUE (default) return the logarithm of the determinant.
}
\item{\dots}{
not used.
}
}
\value{
\item{scalar}{(logarithm of) the determinant.}
}
\author{
Ye Wang (Eric)
Maintainer: \email{ericwang921198@gmail.com}
}
\seealso{
\code{\link{solve.lrpd}} for inverse.
}
\examples{
library(lrpd)
set.seed(2)
K <- 1000
L <- matrix(rnorm(K*floor(K/10)),K,floor(K/10))
Sl <- matrix(rnorm(floor(K/10)*floor(K/10)),floor(K/10),floor(K/10))
S <- Sl\%*\%t(Sl)+diag(rnorm(floor(K/10))^2)
N <- rnorm(K)^2
R <- L\%*\%S\%*\%t(L) + diag(N)
mat <- lrpd(N,L,S)
system.time(RI1 <- as.numeric(determinant(R)$modulus))
system.time(RI2 <- determinant(mat))
all.equal(RI1,RI2)
}
\keyword{algebra}
|
library(tidyverse)
library(plotly)
# read in data
df <- readr::read_csv("./data/citibike-tripdata.csv")
# create a summary table
# create a bar plot
ggplot() +
geom_bar(stat="identity", show.legend=F) +
ggtitle("Total Rides by Station") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(labels=function(x){gsub(" ", "\n", summary$start_station_name)})
# create a scatter plot the ggplot way
ggplot() + geom_point()
# create scatter plot with plotly
plot_ly()
| /02_project/eda.R | no_license | awaagner/shiny_workshop | R | false | false | 497 | r | library(tidyverse)
library(plotly)
# read in data
df <- readr::read_csv("./data/citibike-tripdata.csv")
# create a summary table
# create a bar plot
ggplot() +
geom_bar(stat="identity", show.legend=F) +
ggtitle("Total Rides by Station") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_discrete(labels=function(x){gsub(" ", "\n", summary$start_station_name)})
# create a scatter plot the ggplot way
ggplot() + geom_point()
# create scatter plot with plotly
plot_ly()
|
#!/usr/bin/env Rscript
cat("\n---1) Updating local Git repository...\n\n")
system("git tag -d $(git tag)") #deletes local tags
system("git fetch --all") #fetches all remotes into local repo, including tags.
system("git checkout master")
cat("\n--2) Listing available SciServer version tags:\n\n")
tags = system('git tag --list "*sciserver*"', intern=TRUE)
if(length(tags)==0){
cat("No SciServer Tags available.\n\n")
}else{
system('git tag --list "*sciserver*"')
}
cat("\n*** Refer to http://www.sciserver.org/support/updates for particular release tag details.\n\n") | /ShowSciServerTags.R | permissive | jonashaase/SciScript-R | R | false | false | 573 | r | #!/usr/bin/env Rscript
cat("\n---1) Updating local Git repository...\n\n")
system("git tag -d $(git tag)") #deletes local tags
system("git fetch --all") #fetches all remotes into local repo, including tags.
system("git checkout master")
cat("\n--2) Listing available SciServer version tags:\n\n")
tags = system('git tag --list "*sciserver*"', intern=TRUE)
if(length(tags)==0){
cat("No SciServer Tags available.\n\n")
}else{
system('git tag --list "*sciserver*"')
}
cat("\n*** Refer to http://www.sciserver.org/support/updates for particular release tag details.\n\n") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BASiCS_RegressionDE.R
\name{BASiCS_RegressionDE}
\alias{BASiCS_RegressionDE}
\title{Detection of genes with changes in expression using linear regression}
\usage{
BASiCS_RegressionDE(
Chains,
ModelMatrix,
EFDR_M = 0.05,
EFDR_D = 0.05,
EFDR_R = 0.05,
EpsilonM = log2(1.5),
EpsilonD = log2(1.5),
EpsilonR = log2(1.5)/log2(exp(1)),
ProbThresholdM = 2/3,
ProbThresholdD = 2/3,
ProbThresholdR = 2/3,
OrderVariable = "GeneIndex",
GenesSelect = NULL,
Classes = as.list(colnames(ModelMatrix)),
Parameters = c("mu", "delta", "epsilon"),
...
)
}
\arguments{
\item{Chains}{named list of object of class \code{\linkS4class{BASiCS_Chain}}.
No offset is implemented here. Please use \code{BASiCS_CorrectOffset} before.
.}
\item{EFDR_M}{Target for expected false discovery rate related to
the comparison of means. If \code{EFDR_M = NULL}, EFDR calibration is not
performed and the posterior probability threshold is set equal to
\code{ProbThresholdM}. Default \code{EFDR_M = 0.05}.}
\item{EFDR_D}{Target for expected false discovery rate related to
the comparison of dispersions. If \code{EFDR_D = NULL}, EFDR calibration is
not performed and the posterior probability threshold is set equal to
\code{ProbThresholdD}.Default \code{EFDR_D = 0.05}.}
\item{EFDR_R}{Target for expected false discovery rate related to
the comparison of residual over-dispersions. If \code{EFDR_R = NULL}, EFDR
calibration is not performed and the posterior probability threshold is set
equal to \code{ProbThresholdR}.Default \code{EFDR_D = 0.05}.}
\item{EpsilonM}{Minimum fold change tolerance threshold for detecting
changes in overall expression (must be a positive real number).
Default value: \code{EpsilonM = log2(1.5)} (i.e. 50\% increase).}
\item{EpsilonD}{Minimum fold change tolerance threshold for detecting
changes in biological over-dispersion (must be a positive real number).
Default value: \code{EpsilonM = log2(1.5)} (i.e. 50\% increase).}
\item{EpsilonR}{Minimum distance threshold for detecting
changes in residual over-dispersion (must be a positive real number).
Default value: \code{EpsilonR= log2(1.5)/log2(exp(1))} (i.e. 50\% increase).}
\item{ProbThresholdM}{Optional parameter. Probability threshold for detecting
changes in overall expression (must be a positive value, between 0 and 1).
If \code{EFDR_M = NULL}, the posterior probability threshold for the
differential mean expression test will be set to \code{ProbThresholdM}. If
a value for \code{EFDR_M} is provided, the posterior probability threshold
is chosen to achieve an EFDR equal to \code{EFDR_M} and \code{ProbThresholdM}
defines a minimum probability threshold for this calibration (this avoids low
values of \code{ProbThresholdM} to be chosen by the EFDR calibration.
Default value \code{ProbThresholdM = 2/3}, i.e. the probability of observing
a log2-FC above \code{EpsilonM} must be at least twice the probality of
observing the complementary event (log2-FC below \code{EpsilonM}).}
\item{ProbThresholdD}{Optional parameter. Probability threshold for detecting
changes in cell-to-cell biological over-dispersion (must be a positive value,
between 0 and 1). Same usage as \code{ProbThresholdM}, depending on the value
provided for \code{EFDR_D}. Default value \code{ProbThresholdD = 2/3}.}
\item{ProbThresholdR}{Optional parameter. Probability threshold for detecting
changes in residual over-dispersion (must be a positive value, between 0 and
1). Same usage as \code{ProbThresholdM}, depending on the value provided for
\code{EFDR_R}. Default value \code{ProbThresholdR = 2/3}.}
\item{OrderVariable}{Ordering variable for output.
Possible values: \code{'GeneIndex'} (default), \code{'GeneName'} and
\code{'Mu'} (mean expression).}
\item{GenesSelect}{Optional argument to provide a user-defined list
of genes to be considered for the comparison.
Default: \code{GenesSelect = NULL}. When used, this argument must be a vector
of \code{TRUE} (include gene) / \code{FALSE} (exclude gene) indicator,
with the same length as the number of intrinsic genes and following the same
order as how genes are displayed in the table of counts.
This argument is necessary in order to have a meaningful EFDR calibration
when the user decides to exclude some genes from the comparison.}
\item{Parameters}{specifies which parameters should be tested.}
\item{...}{Optional parameters.}
\item{Design}{an object of class \code{data.frame} specifiying design of study.
At least one column must be named \code{Chain} and correspond to \code{names(Chains)}.}
\item{Formula}{an object of class \code{formula} specifying a description of the model to be fitted.}
\item{MultiClass}{is a boolean specifying if all coefficients should be tested}
}
\value{
\code{BASiCS_RegressionDE} returns a list similar to BASiCS_TestDE.
}
\description{
Function to assess changes in expression between two or more groups
of cells (mean and over-dispersion).
}
\seealso{
\link[BASiCS]{BASiCS_TestDE}
}
| /man/BASiCS_RegressionDE.Rd | no_license | nstroustrup/HelpingHand | R | false | true | 5,057 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BASiCS_RegressionDE.R
\name{BASiCS_RegressionDE}
\alias{BASiCS_RegressionDE}
\title{Detection of genes with changes in expression using linear regression}
\usage{
BASiCS_RegressionDE(
Chains,
ModelMatrix,
EFDR_M = 0.05,
EFDR_D = 0.05,
EFDR_R = 0.05,
EpsilonM = log2(1.5),
EpsilonD = log2(1.5),
EpsilonR = log2(1.5)/log2(exp(1)),
ProbThresholdM = 2/3,
ProbThresholdD = 2/3,
ProbThresholdR = 2/3,
OrderVariable = "GeneIndex",
GenesSelect = NULL,
Classes = as.list(colnames(ModelMatrix)),
Parameters = c("mu", "delta", "epsilon"),
...
)
}
\arguments{
\item{Chains}{named list of object of class \code{\linkS4class{BASiCS_Chain}}.
No offset is implemented here. Please use \code{BASiCS_CorrectOffset} before.
.}
\item{EFDR_M}{Target for expected false discovery rate related to
the comparison of means. If \code{EFDR_M = NULL}, EFDR calibration is not
performed and the posterior probability threshold is set equal to
\code{ProbThresholdM}. Default \code{EFDR_M = 0.05}.}
\item{EFDR_D}{Target for expected false discovery rate related to
the comparison of dispersions. If \code{EFDR_D = NULL}, EFDR calibration is
not performed and the posterior probability threshold is set equal to
\code{ProbThresholdD}.Default \code{EFDR_D = 0.05}.}
\item{EFDR_R}{Target for expected false discovery rate related to
the comparison of residual over-dispersions. If \code{EFDR_R = NULL}, EFDR
calibration is not performed and the posterior probability threshold is set
equal to \code{ProbThresholdR}.Default \code{EFDR_D = 0.05}.}
\item{EpsilonM}{Minimum fold change tolerance threshold for detecting
changes in overall expression (must be a positive real number).
Default value: \code{EpsilonM = log2(1.5)} (i.e. 50\% increase).}
\item{EpsilonD}{Minimum fold change tolerance threshold for detecting
changes in biological over-dispersion (must be a positive real number).
Default value: \code{EpsilonM = log2(1.5)} (i.e. 50\% increase).}
\item{EpsilonR}{Minimum distance threshold for detecting
changes in residual over-dispersion (must be a positive real number).
Default value: \code{EpsilonR= log2(1.5)/log2(exp(1))} (i.e. 50\% increase).}
\item{ProbThresholdM}{Optional parameter. Probability threshold for detecting
changes in overall expression (must be a positive value, between 0 and 1).
If \code{EFDR_M = NULL}, the posterior probability threshold for the
differential mean expression test will be set to \code{ProbThresholdM}. If
a value for \code{EFDR_M} is provided, the posterior probability threshold
is chosen to achieve an EFDR equal to \code{EFDR_M} and \code{ProbThresholdM}
defines a minimum probability threshold for this calibration (this avoids low
values of \code{ProbThresholdM} to be chosen by the EFDR calibration.
Default value \code{ProbThresholdM = 2/3}, i.e. the probability of observing
a log2-FC above \code{EpsilonM} must be at least twice the probality of
observing the complementary event (log2-FC below \code{EpsilonM}).}
\item{ProbThresholdD}{Optional parameter. Probability threshold for detecting
changes in cell-to-cell biological over-dispersion (must be a positive value,
between 0 and 1). Same usage as \code{ProbThresholdM}, depending on the value
provided for \code{EFDR_D}. Default value \code{ProbThresholdD = 2/3}.}
\item{ProbThresholdR}{Optional parameter. Probability threshold for detecting
changes in residual over-dispersion (must be a positive value, between 0 and
1). Same usage as \code{ProbThresholdM}, depending on the value provided for
\code{EFDR_R}. Default value \code{ProbThresholdR = 2/3}.}
\item{OrderVariable}{Ordering variable for output.
Possible values: \code{'GeneIndex'} (default), \code{'GeneName'} and
\code{'Mu'} (mean expression).}
\item{GenesSelect}{Optional argument to provide a user-defined list
of genes to be considered for the comparison.
Default: \code{GenesSelect = NULL}. When used, this argument must be a vector
of \code{TRUE} (include gene) / \code{FALSE} (exclude gene) indicator,
with the same length as the number of intrinsic genes and following the same
order as how genes are displayed in the table of counts.
This argument is necessary in order to have a meaningful EFDR calibration
when the user decides to exclude some genes from the comparison.}
\item{Parameters}{specifies which parameters should be tested.}
\item{...}{Optional parameters.}
\item{Design}{an object of class \code{data.frame} specifiying design of study.
At least one column must be named \code{Chain} and correspond to \code{names(Chains)}.}
\item{Formula}{an object of class \code{formula} specifying a description of the model to be fitted.}
\item{MultiClass}{is a boolean specifying if all coefficients should be tested}
}
\value{
\code{BASiCS_RegressionDE} returns a list similar to BASiCS_TestDE.
}
\description{
Function to assess changes in expression between two or more groups
of cells (mean and over-dispersion).
}
\seealso{
\link[BASiCS]{BASiCS_TestDE}
}
|
#!/usr/bin/env Rscript
# mcmc-odesim.R
# authors: Ephraim Hanks, Nathan Wikle, Emily Strong
# last edited: 20 Nov 2020
#
# This file defines the mcmc.odesim function, which generates
# samples from the posterior of all model parameters using
# MCMC. A large number of function arguments are available
# (see function definition); output is returned as a large list.
mcmc.odesim <- function(
n.mcmc, # number of MCMC iterations to run
df, # state-level covid data (must be a data frame)
odepath, # path to "odesim"
odesim.ver = "v5", # version of odesim (defaults to "v5")
lik.tot = TRUE, # evaluate likelihood for total new cases ONLY (ie, set F and use lik.age for age-strat. data; default = TRUE)
lik.age = FALSE, # evaluate likelihood for age-struc. new cases and hosps AND total new cases and hosps (default = FALSE)
lik.hosp.new = TRUE, # evaluate likelihood for new hosp. cases (default = FALSE)
lik.hosp.curr = FALSE, # evaluate likelihood for current hosp. (default = FALSE)
lik.icu.curr = FALSE, # evaluate likelihood for current icu admits (default = FALSE)
lik.vent.curr = FALSE, # evaluate likelihood for current vent admits (default = FALSE)
lik.tot.deaths = FALSE, # evaluate likelihood for tot. deaths ONLY (ie, set F and use lik.age.deaths for age-strat. data; default = FALSE)
lik.age.deaths = FALSE, # evaluate likelihood for age-struc. new deaths and total new deaths (default = FALSE)
lik.home.deaths = FALSE, # evaluate likelihood for new home deaths (default = FALSE)
lik.hosp.deaths = FALSE, # evaluate likelihood for new hosp. deaths (default = FALSE)
lik.hosp.discharges = FALSE, # evaluate likelihood for hospital discharges (default = FALSE)
case.constraint = FALSE, # constrain fit to cumulative cases to be within 10% of data (default = FALSE)
active.surv = FALSE, # include active surveillance data (default = FALSE)
p.asympt = 0.4, # proportion of asymptomatic individuals (default = 0.4; CAUTION: DO NOT CHANGE UNLESS ODESIM REFLECTS A DIFFERENT VALUE!)
beta.start, # starting values for contact rate spline loadings
spline.beta, # fda "spline" object spanning the time window
report.rate.params.start, # rate at which infecteds report (vector or scalar)
spline.rr, # spline object (iSpline, bSpline, or constant vec) spanning time window
ode.params.start = NULL, # odesim param starting values (named vector matching odesim CLOs)
const.params = NULL, # odesim CLOs, to be kept constant
ode.params.prior.min = -Inf, # vector of lower bounds for odesim params (uniform priors)
ode.params.prior.max = Inf, # vector of upper bounds for odesim params (uniform priors)
non.odesim.params = NULL, # non-odesim param starting values (eg, hosp.report.rate)
lik.params.start = NULL, # starting values for dispersion parameters in NB likelihood
fixed.nb.disp = FALSE, # Boolean indicating if NB dispersion params should be fixed (default = FALSE)
start.day = 61, # start day of odesim (default = 61, DON'T CHANGE)
end.day, # end day of odesim
introday = NULL, # day of first infected
loc = "RI", # US state used for analysis (one of "RI" (default), "MA", or "PA")
s2.hosp.start = .01, # initial value for current hosp variance hyperparam
s2.icu.start = .01, # initial value for current icu variance hyperparam
s2.vent.start = .01, # initial value for current vent variance hyperparam
s2.beta.start = .01, # initial value for beta prior (multivar. normal) marg. variance hyperparam
s2.rr.start = .01, # initial value for rr prior (multivar. normal) marg. variance hyperparam
adapt.iter = 100, # adaptive tuning update interval (log-adaptive tuning on MH based on Shaby and Wells, 2011)
indep.mh = FALSE, # if TRUE, propose beta separate from other params
t.adapt.start = 0, # number of times adaptive tuning var. has been updated (default = 0)
prop.type = "tnorm", # MH proposal type (default = "tnorm")
adapt.type = "ShabyWells", # adaptive tuning type (default = "ShabyWells")
c0 = 1, # Shaby Wells adaptive tuning constant c0
c1 = 0.8, # Shaby Wells adaptive tuning constant c1
var.tune = NULL, # list of tuning param variances, order = (beta, ode.params, rr, s2, lik)
Sigma.tune = NULL, # list of tuning param covars, order = (beta, ode.params, rr, s2, lik)
p.vecs, # weekly vector of delay probabilities (should be c(1, rep(0,6)) unless good reason otherwise)
thin = 1, # rate to thin saved posterior samples (default = 1)
plot.save = TRUE, # plot trace plots while mcmc is running (default = TRUE)
plot.rate = 10, # refresh rate on trace plots (default = 10)
plot.name = "traceplots.pdf", # name of trace plots (default = "traceplots.pdf")
print.iter = FALSE, # print iteration number everytime sample is saved (default = FALSE)
sf.choice = FALSE, # estimate proportion of symptomatics from 7 possible age-strat. combinations (default = FALSE)
) {
########################
### 1. Preliminaries ###
########################
# adaptive structure
t.adapt <- t.adapt.start
# process data frame for likelihood evaluation
data.processed <- data.process(df, loc = loc)
# add data.processed results to global environment
list2env(data.processed, globalenv())
# useful constants
n.days <- length(days)
num.beta <- length(beta.start)
num.rr <- length(report.rate.params.start)
num.ode.params <- length(ode.params.start)
num.lik.params <- length(lik.params.start)
### structures to save parameter samples:
beta.save <- matrix(NA_real_, nrow = n.mcmc/thin, ncol = num.beta)
rr.params.save <- matrix(NA_real_, nrow = n.mcmc/thin, ncol = num.rr)
ode.params.save <- matrix(NA_real_, nrow = n.mcmc/thin, ncol = num.ode.params)
ode.params.names <- names(ode.params.start)
colnames(ode.params.save) <- ode.params.names
if (num.lik.params > 0){
lik.params.save <- matrix(NA_real_, nrow = n.mcmc/thin, ncol = num.lik.params)
} else {
lik.params.save <- NULL
}
s2.beta.save <- rep(NA_real_, n.mcmc/thin)
s2.rr.save <- rep(NA_real_, n.mcmc/thin)
s2.params.save <- matrix(NA_real_, n.mcmc/thin, 3)
loglik.save <- rep(NA_real_, n.mcmc/thin)
### initialize parameters:
# betas
beta <- beta.start # vector of beta parameters
Z <- eval.basis(spline.beta, start.day:end.day) # beta spline basis functions
beta.daily <- Z %*% beta
# reporting rate
if (is.matrix(spline.rr)){
Z.rr <- spline.rr
} else {
Z.rr <- eval.basis(spline.rr, start.day:end.day)
}
rr.params <- report.rate.params.start
rr.daily <- Z.rr %*% rr.params
# hyperparameters
s2.hosp <- s2.hosp.start
s2.icu <- s2.icu.start
s2.vent <- s2.vent.start
s2.params <- c(s2.hosp, s2.icu, s2.vent)
s2.beta <- s2.beta.start # marginal variance of beta prior
s2.rr <- s2.rr.start
ode.params <- ode.params.start
lik.params <- lik.params.start
lik.params.star <- lik.params ## needs to be NULL if lik.params = NULL so that loglik.odesim gets right values
# extra parameters (like hospitalization reporting rate )
extra.params <- NULL
extra.const.params <- NULL
extra.params.fitted.idx <- integer()
extra.params.const.idx <- integer()
if(length(non.odesim.params)>0){
for(k in 1:length(non.odesim.params)){
extra.params.fitted.idx <- c(extra.params.fitted.idx,
which(names(ode.params) == non.odesim.params[k]))
}
if (length(extra.params.fitted.idx) > 0){
extra.params <- ode.params[extra.params.fitted.idx]
}
for(k in 1:length(non.odesim.params)){
extra.params.const.idx <- c(extra.params.const.idx,
which(names(const.params) == non.odesim.params[k]))
}
if (length(extra.params.const.idx) > 0){
extra.const.params <- const.params[extra.params.const.idx]
}
}
### parameter for symptomatic fractions
if (sf.choice){
# if true, create structure for sampling symp-frac settings (6 options)
# list of model possibilities
symp.vals <- c("", "-symp-frac-davies", "-symp-frac-equal 0.3",
"-symp-frac-equal 0.4", "-symp-frac-equal 0.5",
"-symp-frac-equal 0.6", "-symp-frac-equal 0.7")
# number of symp-frac options
n.sf <- length(symp.vals)
# initialize to random starting symp-frac option
K <- sample.int(n.sf, size = 1)
# structure to store models
K.vals <- rep(NA_integer_, n.mcmc/thin)
symp.cur <- symp.vals[K]
} else {
symp.cur <- NULL
K.vals <- NULL
}
### create a matrix P which can be used to calculate Poisson rates after delay
### note: this is no longer used for any state
P <- matrix(0, nrow = end.day + max(sapply(p.vecs, length)), ncol = end.day+1)
colnames(P) <- paste("Day", 1:(end.day+1), sep = " ")
for(j in 1:ncol(P)){
if (j < 74){
## period 1: beginning - March 13
P[j:(j + length(p.vecs[[1]]) - 1), j] <- p.vecs[[1]]
} else if ((j >= 74) & (j < 84)){
## period 2: March 14 - March 23
P[j:(j + length(p.vecs[[2]]) - 1), j] <- p.vecs[[2]]
} else if ((j >= 84) & (j < 88)){
## period 3: March 24 - March 27
P[j:(j + length(p.vecs[[3]]) - 1), j] <- p.vecs[[3]]
} else {
## period 4: March 28 - present
P[j:(j + length(p.vecs[[4]]) - 1), j] <- p.vecs[[4]]
}
}
#################
### 2. Priors ###
#################
### beta prior: random walk
### (penalized regression spline w/ 1st-order diffs)
D <- diff(diag(num.beta), differences = 1)
S <- crossprod(D)
### beta ~ N(0, s2.beta * S^-1)
beta.prior.loglik <- function(beta, s2.beta, precision = S){
if(min(beta) < 0){
ll <- -Inf
} else {
ll <- -1 / 2 / s2.beta * (t(beta) %*% (precision %*% beta))
}
return(ll)
}
### rr prior: random walk
### (same prior for report.rate params as for beta...)
D.rr <- diff(diag(num.rr), differences = 1)
S.rr <- crossprod(D.rr)
### rr ~ N(0, s2.rr * S.rr^-1)
rr.prior.loglik <- function(rr, s2.rr, precision = S.rr){
if(min(rr) < 0 | max(rr) > 1){
ll <- -Inf
} else {
ll <- -1 / 2 / s2.rr * (t(rr) %*% (precision %*% rr))
}
return(ll)
}
### dispersion parameter: exponential prior
### disp ~ Exp(lambda = 100)
lik.params.prior.loglik <- function(lik.params,
lambda = rep(100, length(lik.params))){
if(length(lik.params) < 1){
ll <- 0
} else {
# exponential prior
ll <- sum(dexp(lik.params, rate = lambda, log = TRUE))
# improper uniform prior
# ll=0
# if(min(lik.params)<.1){
# ll=-Inf
# }
}
return(ll)
}
### s2.beta prior: inverse gamma
### s2.beta ~ IG(s = shape, r = rate)
# initialize: s = 1, r = 1
s <- 1; r <- 1
# Uniform priors for odesim parameters
ode.params.prior.loglik <- function(ode.params, ode.params.min, ode.params.max){
ll <- 0
if(sum(c(ode.params < ode.params.min , ode.params > ode.params.max)) > 0){
ll <- -Inf
}
return(ll)
}
### Uniform priors for s2 parameters
s2.params.prior.loglik <- function(s2.params){
ll <- 0
if(min(s2.params) < 0){
ll <- -Inf
}
return(ll)
}
################################
### 3. Likelihood evaluation ###
################################
### simulate trajectory using current beta values:
traj <- traj.from.params(beta = beta.daily,
params = ode.params,
tf = end.day,
introday = introday,
const.params = const.params,
non.odesim.params = non.odesim.params,
odepath = odepath,
loc = loc,
symp = symp.cur)
### evaluate logliklihood under initial conditions
llvals <- loglik.odesim(traj = traj,
df = df,
dp = data.processed,
odesim.ver = odesim.ver,
P = P,
loc = loc,
report.rate = rr.daily,
nb.disp.params = lik.params,
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
s2.hosp = s2.hosp,
s2.icu = s2.icu,
s2.vent = s2.vent,
extra.params = extra.params, ### if cumul. hosp. reporting rate is fitted
extra.const.params = extra.const.params) ### if cumul. hosp. reporting rate is constant
ll.current <- llvals$ll
ll.new <- llvals$ll.new
ll.hosp.new <- llvals$ll.new
###################################
### 4. Adaptive proposal set-up ###
###################################
accept <- rep(0, length(var.tune))
accept.tot <- accept
never.adapt <- TRUE
##########################
### 5. MCMC iterations ###
##########################
for(iter in 1:n.mcmc){
# print-out iteration number every 100 iterations
if (iter %% 100 == 0 & print.iter){
cat(iter, " ")
}
#########################################################################
### Propose beta
#########################################################################
### setting up error catching - reject beta if odesim fails
beta.good <- 0
while(beta.good < 1){
# if(prop.type=="norm"){
# beta.star <- t(rmvnorm(1, c(beta), var.tune[1] * Sigma.tune[[1]]))
# }
# if(prop.type=="tnorm"){
# beta.star <- rtnorm(length(beta),beta,sqrt(var.tune[1]*diag(Sigma.tune[[1]])),0,Inf)
# }
# propose beta.star
beta.star <- exp(rnorm(length(beta), log(beta),
sqrt(var.tune[1] * diag(Sigma.tune[[1]]))))
beta.daily.star <- Z %*% beta.star
### trajectory given beta.star
traj.star <- try(traj.from.params(beta = beta.daily.star,
params = ode.params,
tf = end.day,
introday = introday,
const.params = const.params,
non.odesim.params = non.odesim.params,
odepath = odepath,
loc = loc,
symp = symp.cur), silent = TRUE)
# check if error was thrown. if not, leave while loop
if(class(traj.star) != "try-error"){
beta.good <- 1
}
}
### evaluate loglikelihood for beta.star
llvals.star <- loglik.odesim(traj = traj.star,
dp = data.processed,
report.rate = rr.daily,
nb.disp.params = lik.params,
s2.hosp = s2.params[1],
s2.icu = s2.params[2],
s2.vent = s2.params[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params,
extra.const.params = extra.const.params)
ll.star <- llvals.star$ll
### accept/reject beta.star
mh1 <- ll.star + beta.prior.loglik(beta.star, s2.beta) + sum(log(beta.star))
mh2 <- ll.current + beta.prior.loglik(beta, s2.beta) + sum(log(beta))
# if(prop.type=="tnorm"){
# mh1=mh1+sum(dtnorm(beta,beta.star,sqrt(var.tune[1]*diag(Sigma.tune[[1]])),0,Inf,log=TRUE))
# mh2=mh2+sum(dtnorm(beta.star,beta,sqrt(var.tune[1]*diag(Sigma.tune[[1]])),0,Inf,log=TRUE))
# }
if(is.na(mh1)){
mh1 <- -Inf
}
####if Unif(0,1) < mh1/mh2, accept new beta and disp parameters
if (exp(mh1 - mh2) > runif(1)){
### accept beta.star
beta <- beta.star
beta.daily <- beta.daily.star
traj <- traj.star
llvals=llvals.star
ll.current <- ll.star
## if(lik.age){
## sympt.new.imputed=sympt.new.star
## }
accept[1] <- accept[1] + 1
}
#########################################################################
### Propose ode.params
#########################################################################
### setting up error catching - reject ode.params if odesim fails
beta.good <- 0
while(beta.good < 1){
if(prop.type == "norm"){
ode.params.star <- t(rmvnorm(1, ode.params, var.tune[2] * Sigma.tune[[2]]))
}
if(prop.type == "tnorm"){
ode.params.star <- (rtnorm(length(ode.params), ode.params,
sqrt(var.tune[2] * diag(Sigma.tune[[2]])), ode.params.prior.min, ode.params.prior.max))
}
names(ode.params.star) <- ode.params.names
extra.params.star <- ode.params.star[extra.params.fitted.idx]
### trajectory given ode.params.star
traj.star <- try(traj.from.params(beta = beta.daily,
params = ode.params.star,
tf = end.day,
introday = introday,
const.params = const.params,
non.odesim.params = non.odesim.params,
odepath = odepath,
loc = loc,
symp = symp.cur), silent = TRUE)
if(class(traj.star) != "try-error" & min(ode.params.star > 0)){
beta.good <- 1
}
}
### evalulate loglikelihood for ode.params.star
llvals.star <- loglik.odesim(traj = traj.star,
dp = data.processed,
report.rate = rr.daily,
nb.disp.params = lik.params,
s2.hosp = s2.params[1],
s2.icu = s2.params[2],
s2.vent = s2.params[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params.star,
extra.const.params = extra.const.params)
ll.star <- llvals.star$ll
### accept/reject ode.params.star
mh1 <- ll.star + ode.params.prior.loglik(ode.params.star,
ode.params.prior.min,
ode.params.prior.max)
mh2 <- ll.current + ode.params.prior.loglik(ode.params,
ode.params.prior.min,
ode.params.prior.max)
if(prop.type == "tnorm"){
mh1 <- mh1 + sum(dtnorm(ode.params,
ode.params.star,
sqrt(var.tune[2] * diag(Sigma.tune[[2]])),
ode.params.prior.min,
ode.params.prior.max,
log = TRUE))
mh2 <- mh2 + sum(dtnorm(ode.params.star,
ode.params,
sqrt(var.tune[2] * diag(Sigma.tune[[2]])),
ode.params.prior.min,
ode.params.prior.max,
log = TRUE))
}
if(is.na(mh1)){
mh1 <- -Inf
}
### if Unif(0,1) < mh1/mh2, accept new odesim params
if (exp(mh1 - mh2) > runif(1)){
ode.params <- ode.params.star
extra.params <- extra.params.star
traj <- traj.star
llvals <- llvals.star
ll.current <- ll.star
# if(lik.age){
# sympt.new.imputed=sympt.new.star
# }
accept[2] <- accept[2] + 1
}
#########################################################################
### Propose symp-frac option (if sf.choice == TRUE)
#########################################################################
if (sf.choice){
# propose model from uniform prior
K.star <- sample(n.sf, size = 1)
symp.star <- symp.vals[K.star]
# calculate traj and likelihoods for each possible model:
traj.vals <- list()
good.k <- TRUE
## trajectory given beta.star
traj.star <- try(traj.from.params(beta = beta.daily,
params = ode.params,
tf = end.day,
introday = introday,
const.params = const.params,
non.odesim.params = non.odesim.params,
odepath = odepath,
loc = loc,
symp = symp.star),
silent = TRUE)
if(class(traj.star) == "try-error"){
good.k <- FALSE
}
if (good.k){
llvals.star <- loglik.odesim(traj = traj.star,
dp = data.processed,
report.rate = rr.daily,
nb.disp.params = lik.params,
s2.hosp = s2.params[1],
s2.icu = s2.params[2],
s2.vent = s2.params[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params,
extra.const.params = extra.const.params)
ll.star <- llvals.star$ll
### accept/reject new symp frac option
mh1 <- ll.star # uniform prior and proposal...
mh2 <- ll.current # uniform prior and proposal...
if(is.na(mh1)){
mh1 <- -Inf
}
### if Unif(0,1) < mh1/mh2, accept symp.frac.star
if (exp(mh1 - mh2) > runif(1)){
K <- K.star
symp.cur <- symp.star
llvals <- llvals.star
ll.current <- ll.star
traj <- traj.star
}
}
}
#########################################################################
### Propose rr.params
#########################################################################
# sample new rr params
rr.params.star <- rtnorm(length(rr.params), rr.params,
sqrt(diag(var.tune[3] * Sigma.tune[[3]])), 0, 1)
rr.daily.star <- Z.rr %*% rr.params.star
if (max(rr.daily.star, na.rm = T) < 1){
# make sure rr < 1
### evaluate logliklihood with rr.params.star
llvals.star <- loglik.odesim(traj = traj,
dp = data.processed,
report.rate = rr.daily.star,
nb.disp.params = lik.params,
s2.hosp = s2.params[1],
s2.icu = s2.params[2],
s2.vent = s2.params[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params,
extra.const.params = extra.const.params)
ll.star <- llvals.star$ll
### accept/reject rr.params.star
mh1 <- ll.star + rr.prior.loglik(rr.params.star,
s2.rr,
precision = S.rr)
mh2 <- ll.current + rr.prior.loglik(rr.params,
s2.rr,
precision = S.rr)
if(prop.type == "tnorm"){
mh1 <- mh1 + sum(dtnorm(rr.params,
rr.params.star,
sqrt(var.tune[3] * diag(Sigma.tune[[3]])),
0, 1, log = TRUE))
mh2 <- mh2 + sum(dtnorm(rr.params.star,
rr.params,
sqrt(var.tune[3] * diag(Sigma.tune[[3]])),
0, 1, log = TRUE))
}
if(is.na(mh1)){
mh1 <- -Inf
}
### if Unif(0,1) < mh1/mh2, accept rr.params
if (exp(mh1 - mh2) > runif(1)){
rr.params <- rr.params.star
rr.daily <- rr.daily.star
llvals <- llvals.star
ll.current <- ll.star
# if(lik.age){
# sympt.new.imputed=sympt.new.star
# }
accept[3] <- accept[3] + 1
}
}
#########################################################################
### Propose s2.params
#########################################################################
if(!lik.hosp.curr & !lik.vent.curr & !lik.icu.curr){
# no need to propose new s2 parameters
} else {
# s2.params.star <- t(rtnorm(length(s2.params), s2.params, sqrt(diag(var.tune[4] * Sigma.tune[[4]])),0,Inf))
s2.params.star <- exp(t(rnorm(length(s2.params),
log(s2.params),
sqrt(diag(var.tune[4] * Sigma.tune[[4]])))))
### evaluate logliklihood for s2.params.star
llvals.star <- loglik.odesim(traj = traj,
dp = data.processed,
report.rate = rr.daily,
nb.disp.params = lik.params,
s2.hosp = s2.params.star[1],
s2.icu = s2.params.star[2],
s2.vent = s2.params.star[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params,
extra.const.params = extra.const.params)
ll.star <- llvals.star$ll
### accept/reject s2.params.star
# determine which s2 values to include or not
s2.true <- rep(FALSE, 3)
mh1 <- 0; mh2 <- 0
if(length(llvals.star$ll.hosp) > 0){
mh1 <- mh1 + llvals.star$ll.hosp
mh2 <- mh2 + llvals$ll.hosp
s2.true[1] <- TRUE
}
if(length(llvals.star$ll.icu) > 0){
mh1 <- mh1 + llvals.star$ll.icu
mh2 <- mh2 + llvals$ll.icu
s2.true[2] <- TRUE
}
if(length(llvals.star$ll.vent) > 0){
mh1 <- mh1 + llvals.star$ll.vent
mh2 <- mh2 + llvals$ll.vent
s2.true[3] <- TRUE
}
mh1 <- mh1 + s2.params.prior.loglik(s2.params.star[s2.true])
mh2 <- mh2 + s2.params.prior.loglik(s2.params[s2.true])
if(prop.type == "tnorm"){
mh1 <- mh1 + sum(log(s2.params.star[s2.true]))
#+sum(dtnorm(s2.params,s2.params.star,sqrt(var.tune[4]*diag(Sigma.tune[[4]])),0,Inf,log=TRUE))
mh2 = mh2 + sum(log(s2.params[s2.true]))
#+sum(dtnorm(s2.params.star,s2.params,sqrt(var.tune[4]*diag(Sigma.tune[[4]])),0,Inf,log=TRUE))
}
if(is.na(mh1)){
mh1 <- -Inf
}
### if Unif(0,1) < mh1/mh2, accept new s2.params
if (exp(mh1 - mh2) > runif(1)){
s2.params[s2.true] <- s2.params.star[s2.true]
llvals <- llvals.star
ll.current <- ll.star
# if(lik.age){
# sympt.new.imputed=sympt.new.star
# }
accept[4] <- accept[4] + 1
}
}
#########################################################################
### Propose lik.params (NB dispersion params)
#########################################################################
if(!fixed.nb.disp){
lik.params.star <- exp(t(rnorm(length(lik.params),
log(lik.params),
sqrt(diag(var.tune[5] * Sigma.tune[[5]])))))
### evaluate logliklihood with lik.params.star
llvals.star <- loglik.odesim(traj = traj,
dp = data.processed,
report.rate = rr.daily,
nb.disp.params = lik.params.star,
s2.hosp = s2.params[1],
s2.icu = s2.params[2],
s2.vent = s2.params[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params,
extra.const.params=extra.const.params)
ll.star <- llvals.star$ll
## accept/reject lik.params.star
mh1 <- ll.star +
lik.params.prior.loglik(lik.params.star) +
sum(log(lik.params.star))
mh2 <- ll.current +
lik.params.prior.loglik(lik.params) +
sum(log(lik.params))
if(is.na(mh1)){
mh1 <- -Inf
}
### if Unif(0,1) < mh1/mh2, accept new lik.params
if (exp(mh1 - mh2) > runif(1)){
lik.params <- lik.params.star
llvals <- llvals.star
ll.current <- ll.star
# if(lik.age){
# sympt.new.imputed=sympt.new.star
# }
accept[5] <- accept[5] + 1
}
}
#########################################################################
### Propose new s2.beta and s2.rr parameters
#########################################################################
### conjugate updates from inverse gamma marg. post.
s2.beta <- 1/rgamma(1, shape = s + num.beta / 2,
rate = r + .5 * (t(beta) %*% (S %*% beta)))
s2.rr <- 1/rgamma(1, shape = s + num.rr / 2,
rate = r + .5 * (t(rr.params) %*% (S.rr %*% rr.params)))
#########################################################################
### Save parameters
#########################################################################
if (iter %% thin == 0){
beta.save[iter/thin,] <- beta
ode.params.save[iter/thin,] <- ode.params
rr.params.save[iter/thin,] <- rr.params
s2.params.save[iter/thin,] <- s2.params
lik.params.save[iter/thin,] <- lik.params
s2.beta.save[iter/thin] <- s2.beta
s2.rr.save[iter/thin] <- s2.rr
loglik.save[iter/thin] <- ll.current
# traj.save[iter/thin,,]=as.matrix(traj)
if (sf.choice){
K.vals[iter/thin] <- K
}
}
#########################################################################
### Update adaptive tuning variances
#########################################################################
if (iter %% adapt.iter == 0){
never.adapt <- FALSE
### Using Shaby and Wells adaptive scheme for RWM...
if(adapt.type == "ShabyWells"){
# default constants
# c0 <- 1; c1 <- 0.8;
r.opt <- 0.234
r.hat <- accept / adapt.iter
t.adapt <- iter/adapt.iter + t.adapt.start
gamma.1 <- 1 / (t.adapt)^c1
gamma.2 <- c0 * gamma.1
var.tune <- exp(log(var.tune) + gamma.2 * (r.hat - r.opt))
accept.tot <- accept.tot + accept
accept <- rep(0, length(accept))
cat("\n","current accept rate =", r.hat)
cat("\n","new proposal var =", var.tune, "\n")
}
}
#########################################################################
### plotting output to assess convergence
#########################################################################
if(iter %% plot.rate == 0 & plot.save == TRUE){
pdf(plot.name, width = 10, height = 7)
matplot(beta.save[1:iter/thin,],
type = "l",
main = "beta")
matplot(ode.params.save[1:iter/thin,],
type = "l",
main = "ode.params")
matplot(rr.params.save[1:iter/thin,],
type = "l",
main = "report.rate")
matplot(s2.params.save[1:iter/thin,],
type = "l",
main = "s2.params")
matplot(log(lik.params.save[1:iter/thin,]),
type = "l",
main = "log(NB dispersion params)")
dp <- data.process(df, loc = loc)
tp <- traj.process(traj, loc = loc, odesim.version = odesim.ver)
plots.odesim(dp, tp, rr.daily[-length(rr.daily)])
dev.off()
}
} ## end MCMC
if(print.iter){
cat("\n") # new line
}
#########################################
### 6. Adaptive structures for output ###
#########################################
accept.rate.final <- accept / n.mcmc
Sigma.hat <- list(cov(beta.save),
cov(ode.params.save),
cov(rr.params.save),
cov(s2.params.save),
cov(lik.params.save))
############################
### 7. Save MCMC results ###
############################
### output MCMC samples in a list
list(beta = beta.save,
rr.params = rr.params.save,
ode.params = ode.params.save,
lik.params = lik.params.save,
s2.beta = s2.beta.save,
s2.rr = s2.rr.save,
s2.params = s2.params.save,
traj.save = traj,
sf.choice = sf.choice,
sf.vals = K.vals,
spline.beta = spline.beta,
spline.rr = spline.rr,
df = df,
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
odesim.ver = odesim.ver,
odepath = odepath,
introday = introday,
Sigma.hat = Sigma.hat,
Sigma.tune = Sigma.tune,
var.tune = var.tune,
loglik.final.iter = ll.current,
loglik.vals.final.iter = llvals,
loglik = loglik.save,
accept.rate = accept.rate.final,
t.adapt.end = t.adapt,
ode.params.prior.min = ode.params.prior.min,
ode.params.prior.max = ode.params.prior.max,
thin = thin,
loc = loc,
adapt.type = adapt.type,
const.params = const.params,
non.odesim.params = non.odesim.params,
P=P,
extra.params=extra.params,
extra.const.params=extra.const.params,
active.surv=active.surv,
today = Sys.Date() )
}
| /Nov2020/inference/code/mcmc-odesim.R | no_license | bonilab/covid19-reopening-RI-MA-PA | R | false | false | 42,685 | r | #!/usr/bin/env Rscript
# mcmc-odesim.R
# authors: Ephraim Hanks, Nathan Wikle, Emily Strong
# last edited: 20 Nov 2020
#
# This file defines the mcmc.odesim function, which generates
# samples from the posterior of all model parameters using
# MCMC. A large number of function arguments are available
# (see function definition); output is returned as a large list.
mcmc.odesim <- function(
n.mcmc, # number of MCMC iterations to run
df, # state-level covid data (must be a data frame)
odepath, # path to "odesim"
odesim.ver = "v5", # version of odesim (defaults to "v5")
lik.tot = TRUE, # evaluate likelihood for total new cases ONLY (ie, set F and use lik.age for age-strat. data; default = TRUE)
lik.age = FALSE, # evaluate likelihood for age-struc. new cases and hosps AND total new cases and hosps (default = FALSE)
lik.hosp.new = TRUE, # evaluate likelihood for new hosp. cases (default = FALSE)
lik.hosp.curr = FALSE, # evaluate likelihood for current hosp. (default = FALSE)
lik.icu.curr = FALSE, # evaluate likelihood for current icu admits (default = FALSE)
lik.vent.curr = FALSE, # evaluate likelihood for current vent admits (default = FALSE)
lik.tot.deaths = FALSE, # evaluate likelihood for tot. deaths ONLY (ie, set F and use lik.age.deaths for age-strat. data; default = FALSE)
lik.age.deaths = FALSE, # evaluate likelihood for age-struc. new deaths and total new deaths (default = FALSE)
lik.home.deaths = FALSE, # evaluate likelihood for new home deaths (default = FALSE)
lik.hosp.deaths = FALSE, # evaluate likelihood for new hosp. deaths (default = FALSE)
lik.hosp.discharges = FALSE, # evaluate likelihood for hospital discharges (default = FALSE)
case.constraint = FALSE, # constrain fit to cumulative cases to be within 10% of data (default = FALSE)
active.surv = FALSE, # include active surveillance data (default = FALSE)
p.asympt = 0.4, # proportion of asymptomatic individuals (default = 0.4; CAUTION: DO NOT CHANGE UNLESS ODESIM REFLECTS A DIFFERENT VALUE!)
beta.start, # starting values for contact rate spline loadings
spline.beta, # fda "spline" object spanning the time window
report.rate.params.start, # rate at which infecteds report (vector or scalar)
spline.rr, # spline object (iSpline, bSpline, or constant vec) spanning time window
ode.params.start = NULL, # odesim param starting values (named vector matching odesim CLOs)
const.params = NULL, # odesim CLOs, to be kept constant
ode.params.prior.min = -Inf, # vector of lower bounds for odesim params (uniform priors)
ode.params.prior.max = Inf, # vector of upper bounds for odesim params (uniform priors)
non.odesim.params = NULL, # non-odesim param starting values (eg, hosp.report.rate)
lik.params.start = NULL, # starting values for dispersion parameters in NB likelihood
fixed.nb.disp = FALSE, # Boolean indicating if NB dispersion params should be fixed (default = FALSE)
start.day = 61, # start day of odesim (default = 61, DON'T CHANGE)
end.day, # end day of odesim
introday = NULL, # day of first infected
loc = "RI", # US state used for analysis (one of "RI" (default), "MA", or "PA")
s2.hosp.start = .01, # initial value for current hosp variance hyperparam
s2.icu.start = .01, # initial value for current icu variance hyperparam
s2.vent.start = .01, # initial value for current vent variance hyperparam
s2.beta.start = .01, # initial value for beta prior (multivar. normal) marg. variance hyperparam
s2.rr.start = .01, # initial value for rr prior (multivar. normal) marg. variance hyperparam
adapt.iter = 100, # adaptive tuning update interval (log-adaptive tuning on MH based on Shaby and Wells, 2011)
indep.mh = FALSE, # if TRUE, propose beta separate from other params
t.adapt.start = 0, # number of times adaptive tuning var. has been updated (default = 0)
prop.type = "tnorm", # MH proposal type (default = "tnorm")
adapt.type = "ShabyWells", # adaptive tuning type (default = "ShabyWells")
c0 = 1, # Shaby Wells adaptive tuning constant c0
c1 = 0.8, # Shaby Wells adaptive tuning constant c1
var.tune = NULL, # list of tuning param variances, order = (beta, ode.params, rr, s2, lik)
Sigma.tune = NULL, # list of tuning param covars, order = (beta, ode.params, rr, s2, lik)
p.vecs, # weekly vector of delay probabilities (should be c(1, rep(0,6)) unless good reason otherwise)
thin = 1, # rate to thin saved posterior samples (default = 1)
plot.save = TRUE, # plot trace plots while mcmc is running (default = TRUE)
plot.rate = 10, # refresh rate on trace plots (default = 10)
plot.name = "traceplots.pdf", # name of trace plots (default = "traceplots.pdf")
print.iter = FALSE, # print iteration number everytime sample is saved (default = FALSE)
sf.choice = FALSE, # estimate proportion of symptomatics from 7 possible age-strat. combinations (default = FALSE)
) {
########################
### 1. Preliminaries ###
########################
# adaptive structure
t.adapt <- t.adapt.start
# process data frame for likelihood evaluation
data.processed <- data.process(df, loc = loc)
# add data.processed results to global environment
list2env(data.processed, globalenv())
# useful constants
n.days <- length(days)
num.beta <- length(beta.start)
num.rr <- length(report.rate.params.start)
num.ode.params <- length(ode.params.start)
num.lik.params <- length(lik.params.start)
### structures to save parameter samples:
beta.save <- matrix(NA_real_, nrow = n.mcmc/thin, ncol = num.beta)
rr.params.save <- matrix(NA_real_, nrow = n.mcmc/thin, ncol = num.rr)
ode.params.save <- matrix(NA_real_, nrow = n.mcmc/thin, ncol = num.ode.params)
ode.params.names <- names(ode.params.start)
colnames(ode.params.save) <- ode.params.names
if (num.lik.params > 0){
lik.params.save <- matrix(NA_real_, nrow = n.mcmc/thin, ncol = num.lik.params)
} else {
lik.params.save <- NULL
}
s2.beta.save <- rep(NA_real_, n.mcmc/thin)
s2.rr.save <- rep(NA_real_, n.mcmc/thin)
s2.params.save <- matrix(NA_real_, n.mcmc/thin, 3)
loglik.save <- rep(NA_real_, n.mcmc/thin)
### initialize parameters:
# betas
beta <- beta.start # vector of beta parameters
Z <- eval.basis(spline.beta, start.day:end.day) # beta spline basis functions
beta.daily <- Z %*% beta
# reporting rate
if (is.matrix(spline.rr)){
Z.rr <- spline.rr
} else {
Z.rr <- eval.basis(spline.rr, start.day:end.day)
}
rr.params <- report.rate.params.start
rr.daily <- Z.rr %*% rr.params
# hyperparameters
s2.hosp <- s2.hosp.start
s2.icu <- s2.icu.start
s2.vent <- s2.vent.start
s2.params <- c(s2.hosp, s2.icu, s2.vent)
s2.beta <- s2.beta.start # marginal variance of beta prior
s2.rr <- s2.rr.start
ode.params <- ode.params.start
lik.params <- lik.params.start
lik.params.star <- lik.params ## needs to be NULL if lik.params = NULL so that loglik.odesim gets right values
# extra parameters (like hospitalization reporting rate )
extra.params <- NULL
extra.const.params <- NULL
extra.params.fitted.idx <- integer()
extra.params.const.idx <- integer()
if(length(non.odesim.params)>0){
for(k in 1:length(non.odesim.params)){
extra.params.fitted.idx <- c(extra.params.fitted.idx,
which(names(ode.params) == non.odesim.params[k]))
}
if (length(extra.params.fitted.idx) > 0){
extra.params <- ode.params[extra.params.fitted.idx]
}
for(k in 1:length(non.odesim.params)){
extra.params.const.idx <- c(extra.params.const.idx,
which(names(const.params) == non.odesim.params[k]))
}
if (length(extra.params.const.idx) > 0){
extra.const.params <- const.params[extra.params.const.idx]
}
}
### parameter for symptomatic fractions
if (sf.choice){
# if true, create structure for sampling symp-frac settings (6 options)
# list of model possibilities
symp.vals <- c("", "-symp-frac-davies", "-symp-frac-equal 0.3",
"-symp-frac-equal 0.4", "-symp-frac-equal 0.5",
"-symp-frac-equal 0.6", "-symp-frac-equal 0.7")
# number of symp-frac options
n.sf <- length(symp.vals)
# initialize to random starting symp-frac option
K <- sample.int(n.sf, size = 1)
# structure to store models
K.vals <- rep(NA_integer_, n.mcmc/thin)
symp.cur <- symp.vals[K]
} else {
symp.cur <- NULL
K.vals <- NULL
}
### create a matrix P which can be used to calculate Poisson rates after delay
### note: this is no longer used for any state
P <- matrix(0, nrow = end.day + max(sapply(p.vecs, length)), ncol = end.day+1)
colnames(P) <- paste("Day", 1:(end.day+1), sep = " ")
for(j in 1:ncol(P)){
if (j < 74){
## period 1: beginning - March 13
P[j:(j + length(p.vecs[[1]]) - 1), j] <- p.vecs[[1]]
} else if ((j >= 74) & (j < 84)){
## period 2: March 14 - March 23
P[j:(j + length(p.vecs[[2]]) - 1), j] <- p.vecs[[2]]
} else if ((j >= 84) & (j < 88)){
## period 3: March 24 - March 27
P[j:(j + length(p.vecs[[3]]) - 1), j] <- p.vecs[[3]]
} else {
## period 4: March 28 - present
P[j:(j + length(p.vecs[[4]]) - 1), j] <- p.vecs[[4]]
}
}
#################
### 2. Priors ###
#################
### beta prior: random walk
### (penalized regression spline w/ 1st-order diffs)
D <- diff(diag(num.beta), differences = 1)
S <- crossprod(D)
### beta ~ N(0, s2.beta * S^-1)
beta.prior.loglik <- function(beta, s2.beta, precision = S){
if(min(beta) < 0){
ll <- -Inf
} else {
ll <- -1 / 2 / s2.beta * (t(beta) %*% (precision %*% beta))
}
return(ll)
}
### rr prior: random walk
### (same prior for report.rate params as for beta...)
D.rr <- diff(diag(num.rr), differences = 1)
S.rr <- crossprod(D.rr)
### rr ~ N(0, s2.rr * S.rr^-1)
rr.prior.loglik <- function(rr, s2.rr, precision = S.rr){
if(min(rr) < 0 | max(rr) > 1){
ll <- -Inf
} else {
ll <- -1 / 2 / s2.rr * (t(rr) %*% (precision %*% rr))
}
return(ll)
}
### dispersion parameter: exponential prior
### disp ~ Exp(lambda = 100)
lik.params.prior.loglik <- function(lik.params,
lambda = rep(100, length(lik.params))){
if(length(lik.params) < 1){
ll <- 0
} else {
# exponential prior
ll <- sum(dexp(lik.params, rate = lambda, log = TRUE))
# improper uniform prior
# ll=0
# if(min(lik.params)<.1){
# ll=-Inf
# }
}
return(ll)
}
### s2.beta prior: inverse gamma
### s2.beta ~ IG(s = shape, r = rate)
# initialize: s = 1, r = 1
s <- 1; r <- 1
# Uniform priors for odesim parameters
ode.params.prior.loglik <- function(ode.params, ode.params.min, ode.params.max){
ll <- 0
if(sum(c(ode.params < ode.params.min , ode.params > ode.params.max)) > 0){
ll <- -Inf
}
return(ll)
}
### Uniform priors for s2 parameters
s2.params.prior.loglik <- function(s2.params){
ll <- 0
if(min(s2.params) < 0){
ll <- -Inf
}
return(ll)
}
################################
### 3. Likelihood evaluation ###
################################
### simulate trajectory using current beta values:
traj <- traj.from.params(beta = beta.daily,
params = ode.params,
tf = end.day,
introday = introday,
const.params = const.params,
non.odesim.params = non.odesim.params,
odepath = odepath,
loc = loc,
symp = symp.cur)
### evaluate logliklihood under initial conditions
llvals <- loglik.odesim(traj = traj,
df = df,
dp = data.processed,
odesim.ver = odesim.ver,
P = P,
loc = loc,
report.rate = rr.daily,
nb.disp.params = lik.params,
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
s2.hosp = s2.hosp,
s2.icu = s2.icu,
s2.vent = s2.vent,
extra.params = extra.params, ### if cumul. hosp. reporting rate is fitted
extra.const.params = extra.const.params) ### if cumul. hosp. reporting rate is constant
ll.current <- llvals$ll
ll.new <- llvals$ll.new
ll.hosp.new <- llvals$ll.new
###################################
### 4. Adaptive proposal set-up ###
###################################
accept <- rep(0, length(var.tune))
accept.tot <- accept
never.adapt <- TRUE
##########################
### 5. MCMC iterations ###
##########################
for(iter in 1:n.mcmc){
# print-out iteration number every 100 iterations
if (iter %% 100 == 0 & print.iter){
cat(iter, " ")
}
#########################################################################
### Propose beta
#########################################################################
### setting up error catching - reject beta if odesim fails
beta.good <- 0
while(beta.good < 1){
# if(prop.type=="norm"){
# beta.star <- t(rmvnorm(1, c(beta), var.tune[1] * Sigma.tune[[1]]))
# }
# if(prop.type=="tnorm"){
# beta.star <- rtnorm(length(beta),beta,sqrt(var.tune[1]*diag(Sigma.tune[[1]])),0,Inf)
# }
# propose beta.star
beta.star <- exp(rnorm(length(beta), log(beta),
sqrt(var.tune[1] * diag(Sigma.tune[[1]]))))
beta.daily.star <- Z %*% beta.star
### trajectory given beta.star
traj.star <- try(traj.from.params(beta = beta.daily.star,
params = ode.params,
tf = end.day,
introday = introday,
const.params = const.params,
non.odesim.params = non.odesim.params,
odepath = odepath,
loc = loc,
symp = symp.cur), silent = TRUE)
# check if error was thrown. if not, leave while loop
if(class(traj.star) != "try-error"){
beta.good <- 1
}
}
### evaluate loglikelihood for beta.star
llvals.star <- loglik.odesim(traj = traj.star,
dp = data.processed,
report.rate = rr.daily,
nb.disp.params = lik.params,
s2.hosp = s2.params[1],
s2.icu = s2.params[2],
s2.vent = s2.params[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params,
extra.const.params = extra.const.params)
ll.star <- llvals.star$ll
### accept/reject beta.star
mh1 <- ll.star + beta.prior.loglik(beta.star, s2.beta) + sum(log(beta.star))
mh2 <- ll.current + beta.prior.loglik(beta, s2.beta) + sum(log(beta))
# if(prop.type=="tnorm"){
# mh1=mh1+sum(dtnorm(beta,beta.star,sqrt(var.tune[1]*diag(Sigma.tune[[1]])),0,Inf,log=TRUE))
# mh2=mh2+sum(dtnorm(beta.star,beta,sqrt(var.tune[1]*diag(Sigma.tune[[1]])),0,Inf,log=TRUE))
# }
if(is.na(mh1)){
mh1 <- -Inf
}
####if Unif(0,1) < mh1/mh2, accept new beta and disp parameters
if (exp(mh1 - mh2) > runif(1)){
### accept beta.star
beta <- beta.star
beta.daily <- beta.daily.star
traj <- traj.star
llvals=llvals.star
ll.current <- ll.star
## if(lik.age){
## sympt.new.imputed=sympt.new.star
## }
accept[1] <- accept[1] + 1
}
#########################################################################
### Propose ode.params
#########################################################################
### setting up error catching - reject ode.params if odesim fails
beta.good <- 0
while(beta.good < 1){
if(prop.type == "norm"){
ode.params.star <- t(rmvnorm(1, ode.params, var.tune[2] * Sigma.tune[[2]]))
}
if(prop.type == "tnorm"){
ode.params.star <- (rtnorm(length(ode.params), ode.params,
sqrt(var.tune[2] * diag(Sigma.tune[[2]])), ode.params.prior.min, ode.params.prior.max))
}
names(ode.params.star) <- ode.params.names
extra.params.star <- ode.params.star[extra.params.fitted.idx]
### trajectory given ode.params.star
traj.star <- try(traj.from.params(beta = beta.daily,
params = ode.params.star,
tf = end.day,
introday = introday,
const.params = const.params,
non.odesim.params = non.odesim.params,
odepath = odepath,
loc = loc,
symp = symp.cur), silent = TRUE)
if(class(traj.star) != "try-error" & min(ode.params.star > 0)){
beta.good <- 1
}
}
### evalulate loglikelihood for ode.params.star
llvals.star <- loglik.odesim(traj = traj.star,
dp = data.processed,
report.rate = rr.daily,
nb.disp.params = lik.params,
s2.hosp = s2.params[1],
s2.icu = s2.params[2],
s2.vent = s2.params[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params.star,
extra.const.params = extra.const.params)
ll.star <- llvals.star$ll
### accept/reject ode.params.star
mh1 <- ll.star + ode.params.prior.loglik(ode.params.star,
ode.params.prior.min,
ode.params.prior.max)
mh2 <- ll.current + ode.params.prior.loglik(ode.params,
ode.params.prior.min,
ode.params.prior.max)
if(prop.type == "tnorm"){
mh1 <- mh1 + sum(dtnorm(ode.params,
ode.params.star,
sqrt(var.tune[2] * diag(Sigma.tune[[2]])),
ode.params.prior.min,
ode.params.prior.max,
log = TRUE))
mh2 <- mh2 + sum(dtnorm(ode.params.star,
ode.params,
sqrt(var.tune[2] * diag(Sigma.tune[[2]])),
ode.params.prior.min,
ode.params.prior.max,
log = TRUE))
}
if(is.na(mh1)){
mh1 <- -Inf
}
### if Unif(0,1) < mh1/mh2, accept new odesim params
if (exp(mh1 - mh2) > runif(1)){
ode.params <- ode.params.star
extra.params <- extra.params.star
traj <- traj.star
llvals <- llvals.star
ll.current <- ll.star
# if(lik.age){
# sympt.new.imputed=sympt.new.star
# }
accept[2] <- accept[2] + 1
}
#########################################################################
### Propose symp-frac option (if sf.choice == TRUE)
#########################################################################
if (sf.choice){
# propose model from uniform prior
K.star <- sample(n.sf, size = 1)
symp.star <- symp.vals[K.star]
# calculate traj and likelihoods for each possible model:
traj.vals <- list()
good.k <- TRUE
## trajectory given beta.star
traj.star <- try(traj.from.params(beta = beta.daily,
params = ode.params,
tf = end.day,
introday = introday,
const.params = const.params,
non.odesim.params = non.odesim.params,
odepath = odepath,
loc = loc,
symp = symp.star),
silent = TRUE)
if(class(traj.star) == "try-error"){
good.k <- FALSE
}
if (good.k){
llvals.star <- loglik.odesim(traj = traj.star,
dp = data.processed,
report.rate = rr.daily,
nb.disp.params = lik.params,
s2.hosp = s2.params[1],
s2.icu = s2.params[2],
s2.vent = s2.params[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params,
extra.const.params = extra.const.params)
ll.star <- llvals.star$ll
### accept/reject new symp frac option
mh1 <- ll.star # uniform prior and proposal...
mh2 <- ll.current # uniform prior and proposal...
if(is.na(mh1)){
mh1 <- -Inf
}
### if Unif(0,1) < mh1/mh2, accept symp.frac.star
if (exp(mh1 - mh2) > runif(1)){
K <- K.star
symp.cur <- symp.star
llvals <- llvals.star
ll.current <- ll.star
traj <- traj.star
}
}
}
#########################################################################
### Propose rr.params
#########################################################################
# sample new rr params
rr.params.star <- rtnorm(length(rr.params), rr.params,
sqrt(diag(var.tune[3] * Sigma.tune[[3]])), 0, 1)
rr.daily.star <- Z.rr %*% rr.params.star
if (max(rr.daily.star, na.rm = T) < 1){
# make sure rr < 1
### evaluate logliklihood with rr.params.star
llvals.star <- loglik.odesim(traj = traj,
dp = data.processed,
report.rate = rr.daily.star,
nb.disp.params = lik.params,
s2.hosp = s2.params[1],
s2.icu = s2.params[2],
s2.vent = s2.params[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params,
extra.const.params = extra.const.params)
ll.star <- llvals.star$ll
### accept/reject rr.params.star
mh1 <- ll.star + rr.prior.loglik(rr.params.star,
s2.rr,
precision = S.rr)
mh2 <- ll.current + rr.prior.loglik(rr.params,
s2.rr,
precision = S.rr)
if(prop.type == "tnorm"){
mh1 <- mh1 + sum(dtnorm(rr.params,
rr.params.star,
sqrt(var.tune[3] * diag(Sigma.tune[[3]])),
0, 1, log = TRUE))
mh2 <- mh2 + sum(dtnorm(rr.params.star,
rr.params,
sqrt(var.tune[3] * diag(Sigma.tune[[3]])),
0, 1, log = TRUE))
}
if(is.na(mh1)){
mh1 <- -Inf
}
### if Unif(0,1) < mh1/mh2, accept rr.params
if (exp(mh1 - mh2) > runif(1)){
rr.params <- rr.params.star
rr.daily <- rr.daily.star
llvals <- llvals.star
ll.current <- ll.star
# if(lik.age){
# sympt.new.imputed=sympt.new.star
# }
accept[3] <- accept[3] + 1
}
}
#########################################################################
### Propose s2.params
#########################################################################
if(!lik.hosp.curr & !lik.vent.curr & !lik.icu.curr){
# no need to propose new s2 parameters
} else {
# s2.params.star <- t(rtnorm(length(s2.params), s2.params, sqrt(diag(var.tune[4] * Sigma.tune[[4]])),0,Inf))
s2.params.star <- exp(t(rnorm(length(s2.params),
log(s2.params),
sqrt(diag(var.tune[4] * Sigma.tune[[4]])))))
### evaluate logliklihood for s2.params.star
llvals.star <- loglik.odesim(traj = traj,
dp = data.processed,
report.rate = rr.daily,
nb.disp.params = lik.params,
s2.hosp = s2.params.star[1],
s2.icu = s2.params.star[2],
s2.vent = s2.params.star[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params,
extra.const.params = extra.const.params)
ll.star <- llvals.star$ll
### accept/reject s2.params.star
# determine which s2 values to include or not
s2.true <- rep(FALSE, 3)
mh1 <- 0; mh2 <- 0
if(length(llvals.star$ll.hosp) > 0){
mh1 <- mh1 + llvals.star$ll.hosp
mh2 <- mh2 + llvals$ll.hosp
s2.true[1] <- TRUE
}
if(length(llvals.star$ll.icu) > 0){
mh1 <- mh1 + llvals.star$ll.icu
mh2 <- mh2 + llvals$ll.icu
s2.true[2] <- TRUE
}
if(length(llvals.star$ll.vent) > 0){
mh1 <- mh1 + llvals.star$ll.vent
mh2 <- mh2 + llvals$ll.vent
s2.true[3] <- TRUE
}
mh1 <- mh1 + s2.params.prior.loglik(s2.params.star[s2.true])
mh2 <- mh2 + s2.params.prior.loglik(s2.params[s2.true])
if(prop.type == "tnorm"){
mh1 <- mh1 + sum(log(s2.params.star[s2.true]))
#+sum(dtnorm(s2.params,s2.params.star,sqrt(var.tune[4]*diag(Sigma.tune[[4]])),0,Inf,log=TRUE))
mh2 = mh2 + sum(log(s2.params[s2.true]))
#+sum(dtnorm(s2.params.star,s2.params,sqrt(var.tune[4]*diag(Sigma.tune[[4]])),0,Inf,log=TRUE))
}
if(is.na(mh1)){
mh1 <- -Inf
}
### if Unif(0,1) < mh1/mh2, accept new s2.params
if (exp(mh1 - mh2) > runif(1)){
s2.params[s2.true] <- s2.params.star[s2.true]
llvals <- llvals.star
ll.current <- ll.star
# if(lik.age){
# sympt.new.imputed=sympt.new.star
# }
accept[4] <- accept[4] + 1
}
}
#########################################################################
### Propose lik.params (NB dispersion params)
#########################################################################
if(!fixed.nb.disp){
lik.params.star <- exp(t(rnorm(length(lik.params),
log(lik.params),
sqrt(diag(var.tune[5] * Sigma.tune[[5]])))))
### evaluate logliklihood with lik.params.star
llvals.star <- loglik.odesim(traj = traj,
dp = data.processed,
report.rate = rr.daily,
nb.disp.params = lik.params.star,
s2.hosp = s2.params[1],
s2.icu = s2.params[2],
s2.vent = s2.params[3],
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
active.surv = active.surv,
p.asympt = p.asympt,
case.constraint = case.constraint,
df = df,
odesim.ver = odesim.ver,
P = P,
loc = loc,
extra.params = extra.params,
extra.const.params=extra.const.params)
ll.star <- llvals.star$ll
## accept/reject lik.params.star
mh1 <- ll.star +
lik.params.prior.loglik(lik.params.star) +
sum(log(lik.params.star))
mh2 <- ll.current +
lik.params.prior.loglik(lik.params) +
sum(log(lik.params))
if(is.na(mh1)){
mh1 <- -Inf
}
### if Unif(0,1) < mh1/mh2, accept new lik.params
if (exp(mh1 - mh2) > runif(1)){
lik.params <- lik.params.star
llvals <- llvals.star
ll.current <- ll.star
# if(lik.age){
# sympt.new.imputed=sympt.new.star
# }
accept[5] <- accept[5] + 1
}
}
#########################################################################
### Propose new s2.beta and s2.rr parameters
#########################################################################
### conjugate updates from inverse gamma marg. post.
s2.beta <- 1/rgamma(1, shape = s + num.beta / 2,
rate = r + .5 * (t(beta) %*% (S %*% beta)))
s2.rr <- 1/rgamma(1, shape = s + num.rr / 2,
rate = r + .5 * (t(rr.params) %*% (S.rr %*% rr.params)))
#########################################################################
### Save parameters
#########################################################################
if (iter %% thin == 0){
beta.save[iter/thin,] <- beta
ode.params.save[iter/thin,] <- ode.params
rr.params.save[iter/thin,] <- rr.params
s2.params.save[iter/thin,] <- s2.params
lik.params.save[iter/thin,] <- lik.params
s2.beta.save[iter/thin] <- s2.beta
s2.rr.save[iter/thin] <- s2.rr
loglik.save[iter/thin] <- ll.current
# traj.save[iter/thin,,]=as.matrix(traj)
if (sf.choice){
K.vals[iter/thin] <- K
}
}
#########################################################################
### Update adaptive tuning variances
#########################################################################
if (iter %% adapt.iter == 0){
never.adapt <- FALSE
### Using Shaby and Wells adaptive scheme for RWM...
if(adapt.type == "ShabyWells"){
# default constants
# c0 <- 1; c1 <- 0.8;
r.opt <- 0.234
r.hat <- accept / adapt.iter
t.adapt <- iter/adapt.iter + t.adapt.start
gamma.1 <- 1 / (t.adapt)^c1
gamma.2 <- c0 * gamma.1
var.tune <- exp(log(var.tune) + gamma.2 * (r.hat - r.opt))
accept.tot <- accept.tot + accept
accept <- rep(0, length(accept))
cat("\n","current accept rate =", r.hat)
cat("\n","new proposal var =", var.tune, "\n")
}
}
#########################################################################
### plotting output to assess convergence
#########################################################################
if(iter %% plot.rate == 0 & plot.save == TRUE){
pdf(plot.name, width = 10, height = 7)
matplot(beta.save[1:iter/thin,],
type = "l",
main = "beta")
matplot(ode.params.save[1:iter/thin,],
type = "l",
main = "ode.params")
matplot(rr.params.save[1:iter/thin,],
type = "l",
main = "report.rate")
matplot(s2.params.save[1:iter/thin,],
type = "l",
main = "s2.params")
matplot(log(lik.params.save[1:iter/thin,]),
type = "l",
main = "log(NB dispersion params)")
dp <- data.process(df, loc = loc)
tp <- traj.process(traj, loc = loc, odesim.version = odesim.ver)
plots.odesim(dp, tp, rr.daily[-length(rr.daily)])
dev.off()
}
} ## end MCMC
if(print.iter){
cat("\n") # new line
}
#########################################
### 6. Adaptive structures for output ###
#########################################
accept.rate.final <- accept / n.mcmc
Sigma.hat <- list(cov(beta.save),
cov(ode.params.save),
cov(rr.params.save),
cov(s2.params.save),
cov(lik.params.save))
############################
### 7. Save MCMC results ###
############################
### output MCMC samples in a list
list(beta = beta.save,
rr.params = rr.params.save,
ode.params = ode.params.save,
lik.params = lik.params.save,
s2.beta = s2.beta.save,
s2.rr = s2.rr.save,
s2.params = s2.params.save,
traj.save = traj,
sf.choice = sf.choice,
sf.vals = K.vals,
spline.beta = spline.beta,
spline.rr = spline.rr,
df = df,
lik.tot = lik.tot,
lik.age = lik.age,
lik.hosp.new = lik.hosp.new,
lik.hosp.curr = lik.hosp.curr,
lik.icu.curr = lik.icu.curr,
lik.vent.curr = lik.vent.curr,
lik.tot.deaths = lik.tot.deaths,
lik.home.deaths = lik.home.deaths,
lik.hosp.deaths = lik.hosp.deaths,
lik.age.deaths = lik.age.deaths,
lik.hosp.discharges = lik.hosp.discharges,
odesim.ver = odesim.ver,
odepath = odepath,
introday = introday,
Sigma.hat = Sigma.hat,
Sigma.tune = Sigma.tune,
var.tune = var.tune,
loglik.final.iter = ll.current,
loglik.vals.final.iter = llvals,
loglik = loglik.save,
accept.rate = accept.rate.final,
t.adapt.end = t.adapt,
ode.params.prior.min = ode.params.prior.min,
ode.params.prior.max = ode.params.prior.max,
thin = thin,
loc = loc,
adapt.type = adapt.type,
const.params = const.params,
non.odesim.params = non.odesim.params,
P=P,
extra.params=extra.params,
extra.const.params=extra.const.params,
active.surv=active.surv,
today = Sys.Date() )
}
|
library(mytestpkg)
context("test if my first package works")
test_that("my function works", {
expect_equal(followers(5), c(5, 6, 7))
})
test_that("my second function works", {
expect_equal(half(10), 5)
})
| /tests/testthat/tests.R | no_license | joanaseg/mytestpkg | R | false | false | 213 | r | library(mytestpkg)
context("test if my first package works")
test_that("my function works", {
expect_equal(followers(5), c(5, 6, 7))
})
test_that("my second function works", {
expect_equal(half(10), 5)
})
|
#' Estimate the Survival Curve
#'
#' Return the predicted survival probability at each unique time point
#' @param PosteriorDraws Matrix of random draws from the posterior distribution
#' of the survival curve.
#' @export
SurvEst = function(PosteriorDraws) {
return(apply(PosteriorDraws, 2, mean))
} | /R/SurvEst.R | no_license | nillen0/SurvBART | R | false | false | 303 | r | #' Estimate the Survival Curve
#'
#' Return the predicted survival probability at each unique time point
#' @param PosteriorDraws Matrix of random draws from the posterior distribution
#' of the survival curve.
#' @export
SurvEst = function(PosteriorDraws) {
return(apply(PosteriorDraws, 2, mean))
} |
#' @title Applying 3 adequacy tests to the Random walk model
#'
#' @description Investigating if the Random walk model is an adequate statistical description of an evolutionary
#' time series by applying the following tests (1) autocorrelation (2) runs test, and (3) constant variation.
#'
#' @param y a paleoTS object
#'
#' @param nrep number of iterations in the parametric bootstrap (number of simulated time series); default is 1000.
#'
#' @param conf confidence level for judging whether a model is an adequate statistical description of the data.
#' Number must be between 0 and 1. A higher number means less strict judgment of whether a model is adequate; default
#' is 0.95. Tests are two-tailed, which means a model is judged adequate if the observed test statistic is within the 2.5
#' percent of the extreme values of the calculated test statistics on the simulated data given the default confidence
#' value of 0.95.
#'
#' @param plot logical; if TRUE, the value of the test statistic calculated based on the observed fossil
#' time series is plotted on the distribution of test statistics calculated on the simulated time series;
#' default is TRUE.
#'
#' @param vstep the variance of the step distribution. This parameter is automatically estimated from the data, if not set
#' by the user (usually not recommended).
#'
#' @details A wrapper function for investigating adequacy of the directional trend model
#' applying all three tests at the same time.
#'
#'
#' @return First part of the output summarizes the number of iterations in the parametric bootstrap and the
#' confidence level for judging whether a model is an adequate statistical description of the data. The last
#' part of the output is a data frame with the adequacy tests as columns and the following rows:
#'
#' @return
#' \item{estimate}{The calculated test statistic on the observed data.}
#' \item{min.sim}{The smallest test statistic calculated on the simulated data.}
#' \item{max.sim}{The largest test statistic calculated on the simulated data.}
#' \item{p-value}{Not a real p-value, but is calculated as the fraction of simulated test statistics
#' that is larger (or smaller) than the calculated test statistic on the observed data divided by 0.5.
#' A value of 1 means 50 percent of the test statistics on the simulated data are larger and smaller
#' than the calculated statistic on the observed data. A value of 0.10 means 90 percent of the test
#' statistics on the simulated data are larger or smaller than the test statistic on the observed time
#' series.}
#' \item{result}{Whether the model PASSED or FAILED the adequacy test. The outcome depends on the
#' confidence level.}
#'
#'@author Kjetil L. Voje
#'
#'@references Voje, K.L. 2018. Assessing adequacy of models of phyletic evolution in the fossil record. \emph{Methods in Ecology and Evoluton}. (in press).
#'@references Voje, K.L., Starrfelt, J., and Liow, L.H. 2018. Model adequacy and microevolutionary explanations for stasis in the fossil record. \emph{The American Naturalist}. 191:509-523.
#'
#'@seealso \code{\link{fit3adequacy.trend}}, \code{\link{fit4adequacy.stasis}}
#' @export
#'@examples
#'## generate a paleoTS objects by simulating random walk
#'x <- sim.GRW(ns=40, ms=0, vs=0.1)
#'
#'## Investigate if the random walk model is an adequate description of the data
#'fit3adequacy.RW(x)
#'
fit3adequacy.RW<-function(y, nrep=1000, conf=0.95, plot=TRUE, vstep=NULL){
x<-y$mm
v<-y$vv
n<-y$nn
tt<-y$tt
if (is.null(vstep)) vstep<-opt.joint.URW(y)$parameters[2]
lower<-(1-conf)/2
upper<-(1+conf)/2
# Compute the test statistics for the observed time series
obs.auto.corr<-auto.corr(x, model="RW")
obs.runs.test<-runs.test(x, model="RW")
obs.slope.test<-slope.test(x, tt, model="RW")
#Run parametric bootstrap
out.auto<-auto.corr.test.RW(y, nrep, conf, plot=FALSE, save.replicates = TRUE, vstep)
out.runs<-runs.test.RW(y,nrep, conf, plot=FALSE, save.replicates = TRUE, vstep)
out.slope<-slope.test.RW(y,nrep, conf, plot=FALSE, save.replicates = TRUE, vstep)
#Preparing the output
output<-c(as.vector(matrix(unlist(out.auto[[3]]),ncol=5,byrow=FALSE)),
as.vector(matrix(unlist(out.runs[[3]]),ncol=5,byrow=FALSE)),
as.vector(matrix(unlist(out.slope[[3]]),ncol=5,byrow=FALSE)))
output<-as.data.frame(cbind(c(output[c(1,6,11)]), c(output[c(2,7,12)]),
c(output[c(3,8,13)]), c(output[c(4,9,14)]),
c(output[c(5,10,15)])), ncol=5)
rownames(output)<-c("auto.corr", "runs.test", "slope.test")
colnames(output)<-c("estimate", "min.sim" ,"max.sim","p-value", "result")
if (plot==TRUE) {
par(mfrow=c(1,3))
model.names<-c("auto.corr", "runs.test", "slope.test")
plotting.distributions(out.auto$replicates,obs.auto.corr, model.names[1], xlab="Simulated data", main="Autocorrelation");
plotting.distributions(out.runs$replicates,obs.runs.test, model.names[2], xlab="Simulated data", main="Runs");
plotting.distributions(out.slope$replicates,obs.slope.test, model.names[3], xlab="Simulated data", main="Fixed variance");
}
summary.out<-as.data.frame(c(nrep, conf))
rownames(summary.out)<-c("replications", "confidence level")
colnames(summary.out)<-("Value")
out<- list("info" = summary.out, "summary" = output)
return(out)
}
| /R/fit3adequacy.RW.R | no_license | klvoje/adePEM | R | false | false | 5,351 | r | #' @title Applying 3 adequacy tests to the Random walk model
#'
#' @description Investigating if the Random walk model is an adequate statistical description of an evolutionary
#' time series by applying the following tests (1) autocorrelation (2) runs test, and (3) constant variation.
#'
#' @param y a paleoTS object
#'
#' @param nrep number of iterations in the parametric bootstrap (number of simulated time series); default is 1000.
#'
#' @param conf confidence level for judging whether a model is an adequate statistical description of the data.
#' Number must be between 0 and 1. A higher number means less strict judgment of whether a model is adequate; default
#' is 0.95. Tests are two-tailed, which means a model is judged adequate if the observed test statistic is within the 2.5
#' percent of the extreme values of the calculated test statistics on the simulated data given the default confidence
#' value of 0.95.
#'
#' @param plot logical; if TRUE, the value of the test statistic calculated based on the observed fossil
#' time series is plotted on the distribution of test statistics calculated on the simulated time series;
#' default is TRUE.
#'
#' @param vstep the variance of the step distribution. This parameter is automatically estimated from the data, if not set
#' by the user (usually not recommended).
#'
#' @details A wrapper function for investigating adequacy of the directional trend model
#' applying all three tests at the same time.
#'
#'
#' @return First part of the output summarizes the number of iterations in the parametric bootstrap and the
#' confidence level for judging whether a model is an adequate statistical description of the data. The last
#' part of the output is a data frame with the adequacy tests as columns and the following rows:
#'
#' @return
#' \item{estimate}{The calculated test statistic on the observed data.}
#' \item{min.sim}{The smallest test statistic calculated on the simulated data.}
#' \item{max.sim}{The largest test statistic calculated on the simulated data.}
#' \item{p-value}{Not a real p-value, but is calculated as the fraction of simulated test statistics
#' that is larger (or smaller) than the calculated test statistic on the observed data divided by 0.5.
#' A value of 1 means 50 percent of the test statistics on the simulated data are larger and smaller
#' than the calculated statistic on the observed data. A value of 0.10 means 90 percent of the test
#' statistics on the simulated data are larger or smaller than the test statistic on the observed time
#' series.}
#' \item{result}{Whether the model PASSED or FAILED the adequacy test. The outcome depends on the
#' confidence level.}
#'
#'@author Kjetil L. Voje
#'
#'@references Voje, K.L. 2018. Assessing adequacy of models of phyletic evolution in the fossil record. \emph{Methods in Ecology and Evoluton}. (in press).
#'@references Voje, K.L., Starrfelt, J., and Liow, L.H. 2018. Model adequacy and microevolutionary explanations for stasis in the fossil record. \emph{The American Naturalist}. 191:509-523.
#'
#'@seealso \code{\link{fit3adequacy.trend}}, \code{\link{fit4adequacy.stasis}}
#' @export
#'@examples
#'## generate a paleoTS objects by simulating random walk
#'x <- sim.GRW(ns=40, ms=0, vs=0.1)
#'
#'## Investigate if the random walk model is an adequate description of the data
#'fit3adequacy.RW(x)
#'
fit3adequacy.RW<-function(y, nrep=1000, conf=0.95, plot=TRUE, vstep=NULL){
x<-y$mm
v<-y$vv
n<-y$nn
tt<-y$tt
if (is.null(vstep)) vstep<-opt.joint.URW(y)$parameters[2]
lower<-(1-conf)/2
upper<-(1+conf)/2
# Compute the test statistics for the observed time series
obs.auto.corr<-auto.corr(x, model="RW")
obs.runs.test<-runs.test(x, model="RW")
obs.slope.test<-slope.test(x, tt, model="RW")
#Run parametric bootstrap
out.auto<-auto.corr.test.RW(y, nrep, conf, plot=FALSE, save.replicates = TRUE, vstep)
out.runs<-runs.test.RW(y,nrep, conf, plot=FALSE, save.replicates = TRUE, vstep)
out.slope<-slope.test.RW(y,nrep, conf, plot=FALSE, save.replicates = TRUE, vstep)
#Preparing the output
output<-c(as.vector(matrix(unlist(out.auto[[3]]),ncol=5,byrow=FALSE)),
as.vector(matrix(unlist(out.runs[[3]]),ncol=5,byrow=FALSE)),
as.vector(matrix(unlist(out.slope[[3]]),ncol=5,byrow=FALSE)))
output<-as.data.frame(cbind(c(output[c(1,6,11)]), c(output[c(2,7,12)]),
c(output[c(3,8,13)]), c(output[c(4,9,14)]),
c(output[c(5,10,15)])), ncol=5)
rownames(output)<-c("auto.corr", "runs.test", "slope.test")
colnames(output)<-c("estimate", "min.sim" ,"max.sim","p-value", "result")
if (plot==TRUE) {
par(mfrow=c(1,3))
model.names<-c("auto.corr", "runs.test", "slope.test")
plotting.distributions(out.auto$replicates,obs.auto.corr, model.names[1], xlab="Simulated data", main="Autocorrelation");
plotting.distributions(out.runs$replicates,obs.runs.test, model.names[2], xlab="Simulated data", main="Runs");
plotting.distributions(out.slope$replicates,obs.slope.test, model.names[3], xlab="Simulated data", main="Fixed variance");
}
summary.out<-as.data.frame(c(nrep, conf))
rownames(summary.out)<-c("replications", "confidence level")
colnames(summary.out)<-("Value")
out<- list("info" = summary.out, "summary" = output)
return(out)
}
|
# create a gergm from a formula object (getgergm)
Create_GERGM_Object_From_Formula <- function(object,
theta.coef,
possible_structural_terms,
possible_covariate_terms,
possible_network_terms,
raw_network,
together = 1,
transform.data = NULL,
lambda.coef = NULL,
transformation_type,
is_correlation_network = FALSE,
is_directed = TRUE,
beta_correlation_model = FALSE
){
res1 <- Parse_Formula_Object(object,
possible_structural_terms,
possible_covariate_terms,
possible_network_terms,
raw_network = raw_network,
theta = theta.coef,
terms_to_parse = "structural")
thetas <- res1$thetas
network <- res1$net
alphas <- res1$alphas
statistics <- res1$statistics
thresholds <- res1$thresholds
# for now we are not going to allow any covariates
if (is_correlation_network) {
# if (!is.null(lambda.coef)) {
# stop("Covariate effects are currently not supported for correlation networks. Please respecify without covariates.")
# }
} else if (beta_correlation_model) {
# cat("Using Beta model for correlation network data...\n")
# # if we are using the beta correlation model
# diag(network) <- 1
# bounded.network <- correlations.to.partials(network)
} else if (!is.null(lambda.coef)) {
cat("Covariates Provided...\n")
# create the network based on the transform family
# if there are no lambda.coefficients, we assume there is no transformation
# if there is a transformation specified, transform the observed network
if (transformation_type == "logcauchy" | transformation_type == "lognormal") {
if (min(network) <= 0) {
stop(paste("You have selected either a log-Cauchy or log-normal transformation but you have provided a network with values that are less than or equal to zero. Please ensure that the minimum value of the network you provide is greater than zero, or select a cauchy or normal transformation. The minimum value of the network provided is:",min(network)))
}
network <- log(network)
}
beta <- lambda.coef[1:(length(lambda.coef) - 1)]
sig <- 0.01 + exp(lambda.coef[length(lambda.coef)])
BZ <- 0
if (is.na(dim(transform.data)[3])) {
BZ = BZ + beta * transform.data
}
if (!is.na(dim(transform.data)[3])) {
for (j in 1:(dim(transform.data)[3])) {
BZ <- BZ + beta[j] * transform.data[, , j]
}
}
if (transformation_type == "logcauchy" | transformation_type == "cauchy") {
bounded.network <- pst(network, BZ, sig, 1)
}
if (transformation_type == "lognormal" | transformation_type == "gaussian") {
bounded.network <- pst(network, BZ, sig, Inf)
}
} # end of lambda conditional
if (is.null(lambda.coef)) {
bounded.network <- network
lambda.coef <- as.data.frame(0)
}
if (!is.null(lambda.coef)) {
lambda.coef <- as.data.frame(rbind(lambda.coef,NA))
rownames(lambda.coef) <- c("est", "se")
}
# if we are providing a correlation network, transform it
if (is_correlation_network) {
diag(network) <- 1
print(round(network,2))
bounded.network <- transform.correlations(network)
}
thetas <- t(as.matrix(thetas))
thetas <- rbind(thetas, NA)
colnames(thetas) <- possible_structural_terms
rownames(thetas) <- c("est", "se")
thetas <- as.data.frame(thetas)
object <- Create_GERGM_Object(network = network,
bounded.network = bounded.network,
formula = object,
thetas = thetas,
lambda = lambda.coef,
alpha = alphas,
together = together,
possible.stats = possible_structural_terms,
thresholds = thresholds)
object@stats_to_use <- statistics
return(object)
}
| /R/Create_GERGM_Object_From_Formula.R | no_license | fototo/GERGM | R | false | false | 4,620 | r | # create a gergm from a formula object (getgergm)
Create_GERGM_Object_From_Formula <- function(object,
theta.coef,
possible_structural_terms,
possible_covariate_terms,
possible_network_terms,
raw_network,
together = 1,
transform.data = NULL,
lambda.coef = NULL,
transformation_type,
is_correlation_network = FALSE,
is_directed = TRUE,
beta_correlation_model = FALSE
){
res1 <- Parse_Formula_Object(object,
possible_structural_terms,
possible_covariate_terms,
possible_network_terms,
raw_network = raw_network,
theta = theta.coef,
terms_to_parse = "structural")
thetas <- res1$thetas
network <- res1$net
alphas <- res1$alphas
statistics <- res1$statistics
thresholds <- res1$thresholds
# for now we are not going to allow any covariates
if (is_correlation_network) {
# if (!is.null(lambda.coef)) {
# stop("Covariate effects are currently not supported for correlation networks. Please respecify without covariates.")
# }
} else if (beta_correlation_model) {
# cat("Using Beta model for correlation network data...\n")
# # if we are using the beta correlation model
# diag(network) <- 1
# bounded.network <- correlations.to.partials(network)
} else if (!is.null(lambda.coef)) {
cat("Covariates Provided...\n")
# create the network based on the transform family
# if there are no lambda.coefficients, we assume there is no transformation
# if there is a transformation specified, transform the observed network
if (transformation_type == "logcauchy" | transformation_type == "lognormal") {
if (min(network) <= 0) {
stop(paste("You have selected either a log-Cauchy or log-normal transformation but you have provided a network with values that are less than or equal to zero. Please ensure that the minimum value of the network you provide is greater than zero, or select a cauchy or normal transformation. The minimum value of the network provided is:",min(network)))
}
network <- log(network)
}
beta <- lambda.coef[1:(length(lambda.coef) - 1)]
sig <- 0.01 + exp(lambda.coef[length(lambda.coef)])
BZ <- 0
if (is.na(dim(transform.data)[3])) {
BZ = BZ + beta * transform.data
}
if (!is.na(dim(transform.data)[3])) {
for (j in 1:(dim(transform.data)[3])) {
BZ <- BZ + beta[j] * transform.data[, , j]
}
}
if (transformation_type == "logcauchy" | transformation_type == "cauchy") {
bounded.network <- pst(network, BZ, sig, 1)
}
if (transformation_type == "lognormal" | transformation_type == "gaussian") {
bounded.network <- pst(network, BZ, sig, Inf)
}
} # end of lambda conditional
if (is.null(lambda.coef)) {
bounded.network <- network
lambda.coef <- as.data.frame(0)
}
if (!is.null(lambda.coef)) {
lambda.coef <- as.data.frame(rbind(lambda.coef,NA))
rownames(lambda.coef) <- c("est", "se")
}
# if we are providing a correlation network, transform it
if (is_correlation_network) {
diag(network) <- 1
print(round(network,2))
bounded.network <- transform.correlations(network)
}
thetas <- t(as.matrix(thetas))
thetas <- rbind(thetas, NA)
colnames(thetas) <- possible_structural_terms
rownames(thetas) <- c("est", "se")
thetas <- as.data.frame(thetas)
object <- Create_GERGM_Object(network = network,
bounded.network = bounded.network,
formula = object,
thetas = thetas,
lambda = lambda.coef,
alpha = alphas,
together = together,
possible.stats = possible_structural_terms,
thresholds = thresholds)
object@stats_to_use <- statistics
return(object)
}
|
library('tidyverse')
library('gganimate')
deaths <- read_csv('deaths.csv') # http://fridaythe13th.wikia.com/wiki/List_of_deaths_in_the_Friday_the_13th_films
deaths.totals <- deaths %>% filter(`On-Screen Death?` == 'Yes') %>% select(Film, Name) %>%
group_by(Film) %>%
mutate(n = row_number(), total.deaths = max(n)) %>%
select(Film, total.deaths) %>% ungroup() %>%
distinct() %>%
mutate(cumulative.deaths = cumsum(total.deaths)) %>% ungroup()
deaths.totals$year <- str_extract_all(deaths.totals$Film, "\\([^()]+\\)")
deaths.totals$year <- as.numeric(substring(deaths.totals$year, 2, nchar(deaths.totals$year)-1))
ggplot(deaths.totals, aes(x = year, y = cumulative.deaths)) +
geom_step(color = 'darkred', size = 1) +
scale_x_continuous(breaks = c(1980, 1981, 1982, 1984, 1985, 1988, 1989, 1993, 2009)) +
scale_y_continuous(limits = c(0, 140), breaks = seq(0, 140, 20)) +
labs(title = "Tracking the body count of the Friday the 13th films", subtitle = "cumulative on-screen deaths, from Friday the 13th (1980) to Friday the 13th (2009)", y = "", x = "",
caption = "Source: fandom.wikia.com\nNote: excludes Jason X (2001) and Freddy vs. Jason (2003)") +
theme(plot.title = element_text(size = 16, face = "bold"),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
plot.subtitle = element_text(size = 12),
axis.text = element_text(size = 10),
plot.caption = element_text(hjust = -.01, color = 'grey30', size = 9))
ggsave('plot.png', width = 12, height = 6)
| /code.R | no_license | underthecurve/friday-the-13 | R | false | false | 1,598 | r | library('tidyverse')
library('gganimate')
deaths <- read_csv('deaths.csv') # http://fridaythe13th.wikia.com/wiki/List_of_deaths_in_the_Friday_the_13th_films
deaths.totals <- deaths %>% filter(`On-Screen Death?` == 'Yes') %>% select(Film, Name) %>%
group_by(Film) %>%
mutate(n = row_number(), total.deaths = max(n)) %>%
select(Film, total.deaths) %>% ungroup() %>%
distinct() %>%
mutate(cumulative.deaths = cumsum(total.deaths)) %>% ungroup()
deaths.totals$year <- str_extract_all(deaths.totals$Film, "\\([^()]+\\)")
deaths.totals$year <- as.numeric(substring(deaths.totals$year, 2, nchar(deaths.totals$year)-1))
ggplot(deaths.totals, aes(x = year, y = cumulative.deaths)) +
geom_step(color = 'darkred', size = 1) +
scale_x_continuous(breaks = c(1980, 1981, 1982, 1984, 1985, 1988, 1989, 1993, 2009)) +
scale_y_continuous(limits = c(0, 140), breaks = seq(0, 140, 20)) +
labs(title = "Tracking the body count of the Friday the 13th films", subtitle = "cumulative on-screen deaths, from Friday the 13th (1980) to Friday the 13th (2009)", y = "", x = "",
caption = "Source: fandom.wikia.com\nNote: excludes Jason X (2001) and Freddy vs. Jason (2003)") +
theme(plot.title = element_text(size = 16, face = "bold"),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
plot.subtitle = element_text(size = 12),
axis.text = element_text(size = 10),
plot.caption = element_text(hjust = -.01, color = 'grey30', size = 9))
ggsave('plot.png', width = 12, height = 6)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cfbd_teams.R
\name{cfbd_teams}
\alias{cfbd_teams}
\title{\strong{CFBD Teams Endpoint Overview}}
\description{
\describe{
\item{\code{cfbd_team_info()}:}{ Team Info Lookup.}
\item{\code{cfbd_team_roster()}:}{ Get a team's full roster by year.}
\item{\code{cfbd_team_talent()}:}{ Get composite team talent rankings for all teams in a given year.}
\item{\code{cfbd_team_matchup_records()}:}{ Get matchup history records between two teams.}
\item{\code{cfbd_team_matchup()}:}{ Get matchup history between two teams.}
}
\subsection{\strong{Team info lookup}}{
Lists all teams in conference or all D-I teams if conference is left NULL
Currently, support is only provided for D-I\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_info(conference = "SEC")
cfbd_team_info(conference = "Ind")
cfbd_team_info(year = 2019)
}\if{html}{\out{</div>}}
}
\subsection{\strong{Get team rosters}}{
\subsection{\strong{It is now possible to access yearly rosters}}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_roster(year = 2020)
}\if{html}{\out{</div>}}
}
\subsection{Get a teams full roster by year. If team is not selected, API returns rosters for every team from the selected year.}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_roster(year = 2013, team = "Florida State")
}\if{html}{\out{</div>}}
}
\subsection{Get composite team talent rankings}{
Extracts team talent composite for all teams in a given year as sourced from 247 rankings\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_talent()
cfbd_team_talent(year = 2018)
}\if{html}{\out{</div>}}
}
\subsection{\strong{Get matchup history between two teams.}}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_matchup("Texas A&M", "TCU")
cfbd_team_matchup("Texas A&M", "TCU", min_year = 1975)
cfbd_team_matchup("Florida State", "Florida", min_year = 1975)
}\if{html}{\out{</div>}}
}
\subsection{\strong{Get matchup history records between two teams.}}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_matchup_records("Texas", "Oklahoma")
cfbd_team_matchup_records("Texas A&M", "TCU", min_year = 1975)
}\if{html}{\out{</div>}}
}
}
}
| /man/cfbd_teams.Rd | permissive | Engy-22/cfbfastR | R | false | true | 2,274 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cfbd_teams.R
\name{cfbd_teams}
\alias{cfbd_teams}
\title{\strong{CFBD Teams Endpoint Overview}}
\description{
\describe{
\item{\code{cfbd_team_info()}:}{ Team Info Lookup.}
\item{\code{cfbd_team_roster()}:}{ Get a team's full roster by year.}
\item{\code{cfbd_team_talent()}:}{ Get composite team talent rankings for all teams in a given year.}
\item{\code{cfbd_team_matchup_records()}:}{ Get matchup history records between two teams.}
\item{\code{cfbd_team_matchup()}:}{ Get matchup history between two teams.}
}
\subsection{\strong{Team info lookup}}{
Lists all teams in conference or all D-I teams if conference is left NULL
Currently, support is only provided for D-I\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_info(conference = "SEC")
cfbd_team_info(conference = "Ind")
cfbd_team_info(year = 2019)
}\if{html}{\out{</div>}}
}
\subsection{\strong{Get team rosters}}{
\subsection{\strong{It is now possible to access yearly rosters}}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_roster(year = 2020)
}\if{html}{\out{</div>}}
}
\subsection{Get a teams full roster by year. If team is not selected, API returns rosters for every team from the selected year.}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_roster(year = 2013, team = "Florida State")
}\if{html}{\out{</div>}}
}
\subsection{Get composite team talent rankings}{
Extracts team talent composite for all teams in a given year as sourced from 247 rankings\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_talent()
cfbd_team_talent(year = 2018)
}\if{html}{\out{</div>}}
}
\subsection{\strong{Get matchup history between two teams.}}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_matchup("Texas A&M", "TCU")
cfbd_team_matchup("Texas A&M", "TCU", min_year = 1975)
cfbd_team_matchup("Florida State", "Florida", min_year = 1975)
}\if{html}{\out{</div>}}
}
\subsection{\strong{Get matchup history records between two teams.}}{\if{html}{\out{<div class="sourceCode r">}}\preformatted{cfbd_team_matchup_records("Texas", "Oklahoma")
cfbd_team_matchup_records("Texas A&M", "TCU", min_year = 1975)
}\if{html}{\out{</div>}}
}
}
}
|
## Function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve retrieves the
## inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
} | /cachematrix.R | no_license | katkadz/Coursera_Functions | R | false | false | 1,011 | r | ## Function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve retrieves the
## inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
} |
#!/usr/bin/env Rscript
#args = commandArgs(TRUE)
#if (length(args) == 0) {
# cat ("ERROR: Scenario parameters should be specified\n")
# q(status = 1)
#}
prefixes = "fairness,satisfaction-accept,satisfaction-pushback"
topo = "7018.r0"
evils = "140"
runs = "1,2,3,4,5,6,7,8,9,10"
folder = "attackISP"
good = "0"
producer = "gw"
suppressPackageStartupMessages (library(ggplot2))
suppressPackageStartupMessages (library(reshape2))
suppressPackageStartupMessages (library(doBy))
suppressPackageStartupMessages (library(plyr))
suppressPackageStartupMessages (library(scales))
source ("graph-style.R")
name = paste (sep="-", prefixes, "topo", topo, "evil", evils, "producer", producer)
filename = paste(sep="", "results/",folder,"/process/", name, "-all-data.dat")
if (file_test("-f", filename)) {
cat ("Loading data from", filename, "\n")
load (filename)
} else {
data.all = data.frame ()
for (evil in strsplit(evils,",")[[1]]) {
for (prefix in strsplit(prefixes,",")[[1]]) {
name = paste (sep="-", prefix, "topo", topo, "evil", evil, "good", good, "producer", producer)
filename = paste(sep="", "results/",folder,"/process/", name, ".txt")
cat ("Reading from", filename, "\n")
load (filename)
data.all <- rbind (data.all, data)
}
}
name = paste (sep="-", prefixes, "topo", topo, "evil", evils, "producer", producer)
filename = paste(sep="", "results/",folder,"/process/", name, "-all-data.dat")
cat ("Saving data to", filename, "\n")
save (data.all, file=filename)
}
data.all$Evil = factor(data.all$Evil)
name2 = paste (sep="-", topo, "good", good, "producer", producer)
data.all$Scenario = ordered (data.all$Scenario,
c("fairness", "satisfaction-accept", "satisfaction-pushback"))
levels(data.all$Scenario) <- sub("^satisfaction-pushback$", "Satisfaction-based pushback", levels(data.all$Scenario))
levels(data.all$Scenario) <- sub("^satisfaction-accept$", "Satisfaction-based Interest acceptance", levels(data.all$Scenario))
levels(data.all$Scenario) <- sub("^fairness$", "Token bucket with per interface fairness", levels(data.all$Scenario))
cat (sep="", "Writing to ", paste(sep="","graphs/pdfs/", folder, "/",name2,".pdf"))
pdf (paste(sep="","graphs/pdfs/", folder, "/",name2,".pdf"), width=5, height=4)
minTime = 300
attackTime = 300
gdata = subset(data.all, minTime-100 <= Time & Time < minTime+attackTime+100)
g <- ggplot (gdata) +
stat_summary(aes(x=Time-minTime, y=Ratio, color=Scenario), geom="line", fun.y=mean, size=0.4) +
theme_custom () +
xlab ("Waktu sejak Serangan dilancarkan (detik)") +
ylab ("Min/Max Satisfaction Ratios") +
scale_colour_brewer(palette="Set1") +
scale_fill_brewer(palette="PuOr") +
scale_y_continuous (limits=c(0,1), breaks=seq(0,1,0.1), labels=percent_format ()) +
scale_x_continuous (limits=c(-100,500), breaks=seq(-100,500,50)) +
## facet_wrap (~ Scenario, nrow=5, ncol=1) + #Makhluk ini yang bikin dia pecah2
geom_vline(xintercept = attackTime) +
theme (legend.key.size = unit(0.8, "lines"),
legend.position="bottom", #c(1.0, 0.0),
legend.justification=c(1,0),
legend.background = element_rect (fill="white", colour="black", size=0.1))
print (g)
x = dev.off ()
| /satisfaction-min-max-vs-time.R | no_license | iyonr/ndn-graph | R | false | false | 3,288 | r | #!/usr/bin/env Rscript
#args = commandArgs(TRUE)
#if (length(args) == 0) {
# cat ("ERROR: Scenario parameters should be specified\n")
# q(status = 1)
#}
prefixes = "fairness,satisfaction-accept,satisfaction-pushback"
topo = "7018.r0"
evils = "140"
runs = "1,2,3,4,5,6,7,8,9,10"
folder = "attackISP"
good = "0"
producer = "gw"
suppressPackageStartupMessages (library(ggplot2))
suppressPackageStartupMessages (library(reshape2))
suppressPackageStartupMessages (library(doBy))
suppressPackageStartupMessages (library(plyr))
suppressPackageStartupMessages (library(scales))
source ("graph-style.R")
name = paste (sep="-", prefixes, "topo", topo, "evil", evils, "producer", producer)
filename = paste(sep="", "results/",folder,"/process/", name, "-all-data.dat")
if (file_test("-f", filename)) {
cat ("Loading data from", filename, "\n")
load (filename)
} else {
data.all = data.frame ()
for (evil in strsplit(evils,",")[[1]]) {
for (prefix in strsplit(prefixes,",")[[1]]) {
name = paste (sep="-", prefix, "topo", topo, "evil", evil, "good", good, "producer", producer)
filename = paste(sep="", "results/",folder,"/process/", name, ".txt")
cat ("Reading from", filename, "\n")
load (filename)
data.all <- rbind (data.all, data)
}
}
name = paste (sep="-", prefixes, "topo", topo, "evil", evils, "producer", producer)
filename = paste(sep="", "results/",folder,"/process/", name, "-all-data.dat")
cat ("Saving data to", filename, "\n")
save (data.all, file=filename)
}
data.all$Evil = factor(data.all$Evil)
name2 = paste (sep="-", topo, "good", good, "producer", producer)
data.all$Scenario = ordered (data.all$Scenario,
c("fairness", "satisfaction-accept", "satisfaction-pushback"))
levels(data.all$Scenario) <- sub("^satisfaction-pushback$", "Satisfaction-based pushback", levels(data.all$Scenario))
levels(data.all$Scenario) <- sub("^satisfaction-accept$", "Satisfaction-based Interest acceptance", levels(data.all$Scenario))
levels(data.all$Scenario) <- sub("^fairness$", "Token bucket with per interface fairness", levels(data.all$Scenario))
cat (sep="", "Writing to ", paste(sep="","graphs/pdfs/", folder, "/",name2,".pdf"))
pdf (paste(sep="","graphs/pdfs/", folder, "/",name2,".pdf"), width=5, height=4)
minTime = 300
attackTime = 300
gdata = subset(data.all, minTime-100 <= Time & Time < minTime+attackTime+100)
g <- ggplot (gdata) +
stat_summary(aes(x=Time-minTime, y=Ratio, color=Scenario), geom="line", fun.y=mean, size=0.4) +
theme_custom () +
xlab ("Waktu sejak Serangan dilancarkan (detik)") +
ylab ("Min/Max Satisfaction Ratios") +
scale_colour_brewer(palette="Set1") +
scale_fill_brewer(palette="PuOr") +
scale_y_continuous (limits=c(0,1), breaks=seq(0,1,0.1), labels=percent_format ()) +
scale_x_continuous (limits=c(-100,500), breaks=seq(-100,500,50)) +
## facet_wrap (~ Scenario, nrow=5, ncol=1) + #Makhluk ini yang bikin dia pecah2
geom_vline(xintercept = attackTime) +
theme (legend.key.size = unit(0.8, "lines"),
legend.position="bottom", #c(1.0, 0.0),
legend.justification=c(1,0),
legend.background = element_rect (fill="white", colour="black", size=0.1))
print (g)
x = dev.off ()
|
\name{periodify} %DontDeclareMethods
\alias{periodify}
\alias{periodify.owin}
\alias{periodify.ppp}
\alias{periodify.psp}
\title{
Make Periodic Copies of a Spatial Pattern
}
\description{
Given a spatial pattern (point pattern, line segment pattern,
window, etc) make shifted copies of the pattern
and optionally combine them to make a periodic pattern.
}
\usage{
periodify(X, ...)
\method{periodify}{ppp}(X, nx = 1, ny = 1, ...,
combine=TRUE, warn=TRUE, check=TRUE,
ix=(-nx):nx, iy=(-ny):ny,
ixy=expand.grid(ix=ix,iy=iy))
\method{periodify}{psp}(X, nx = 1, ny = 1, ...,
combine=TRUE, warn=TRUE, check=TRUE,
ix=(-nx):nx, iy=(-ny):ny,
ixy=expand.grid(ix=ix,iy=iy))
\method{periodify}{owin}(X, nx = 1, ny = 1, ...,
combine=TRUE, warn=TRUE,
ix=(-nx):nx, iy=(-ny):ny,
ixy=expand.grid(ix=ix,iy=iy))
}
\arguments{
\item{X}{
An object representing a spatial pattern
(point pattern, line segment pattern or window).
}
\item{nx,ny}{
Integers.
Numbers of additional copies of \code{X} in each direction.
The result will be a grid of \code{2 * nx + 1} by \code{2 * ny + 1}
copies of the original object.
(Overruled by \code{ix, iy, ixy}).
}
\item{\dots}{
Ignored.
}
\item{combine}{
Logical flag determining whether the copies should be superimposed
to make an object like \code{X} (if \code{combine=TRUE}) or
simply returned as a list of objects (\code{combine=FALSE}).
}
\item{warn}{
Logical flag determining whether to issue warnings.
}
\item{check}{
Logical flag determining whether to check the validity of the
combined pattern.
}
\item{ix, iy}{
Integer vectors determining the grid positions of the copies
of \code{X}. (Overruled by \code{ixy}).
}
\item{ixy}{
Matrix or data frame with two columns, giving the
grid positions of the copies of \code{X}.
}
}
\details{
Given a spatial pattern (point pattern, line segment pattern, etc)
this function makes a number of shifted copies of the pattern
and optionally combines them. The function \code{periodify} is
generic, with methods for various kinds of spatial objects.
The default is to make a 3 by 3 array of copies of \code{X} and
combine them into a single pattern of the same kind as \code{X}.
This can be used (for example) to compute toroidal or periodic
edge corrections for various operations on \code{X}.
If the arguments \code{nx}, \code{ny} are given
and other arguments are missing,
the original object will be copied \code{nx} times to the right
and \code{nx} times to the left, then \code{ny} times upward and
\code{ny} times downward, making \code{(2 * nx + 1) * (2 * ny + 1)}
copies altogether, arranged in a grid, centred on the original object.
If the arguments \code{ix}, \code{iy} or \code{ixy} are specified,
then these determine the grid positions of the copies of \code{X}
that will be made. For example \code{(ix,iy) = (1, 2)} means a
copy of \code{X} shifted by the vector \code{(ix * w, iy * h)} where
\code{w,h} are the width and height of the bounding rectangle of \code{X}.
If \code{combine=TRUE} (the default) the copies of \code{X} are
superimposed to create an object of the same kind as \code{X}.
If \code{combine=FALSE} the copies of \code{X} are returned as a list.
}
\value{
If \code{combine=TRUE}, an object of the same class as \code{X}.
If \code{combine=FALSE}, a list of objects of the same class as \code{X}.
}
\seealso{
\code{\link{shift}}
}
\examples{
data(cells)
plot(periodify(cells))
a <- lapply(periodify(cells$window, combine=FALSE),
plot, add=TRUE,lty=2)
}
\author{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}
and Rolf Turner \email{r.turner@auckland.ac.nz}
}
\keyword{spatial}
\keyword{manip}
| /man/periodify.Rd | no_license | jdtuck/spatstat | R | false | false | 3,896 | rd | \name{periodify} %DontDeclareMethods
\alias{periodify}
\alias{periodify.owin}
\alias{periodify.ppp}
\alias{periodify.psp}
\title{
Make Periodic Copies of a Spatial Pattern
}
\description{
Given a spatial pattern (point pattern, line segment pattern,
window, etc) make shifted copies of the pattern
and optionally combine them to make a periodic pattern.
}
\usage{
periodify(X, ...)
\method{periodify}{ppp}(X, nx = 1, ny = 1, ...,
combine=TRUE, warn=TRUE, check=TRUE,
ix=(-nx):nx, iy=(-ny):ny,
ixy=expand.grid(ix=ix,iy=iy))
\method{periodify}{psp}(X, nx = 1, ny = 1, ...,
combine=TRUE, warn=TRUE, check=TRUE,
ix=(-nx):nx, iy=(-ny):ny,
ixy=expand.grid(ix=ix,iy=iy))
\method{periodify}{owin}(X, nx = 1, ny = 1, ...,
combine=TRUE, warn=TRUE,
ix=(-nx):nx, iy=(-ny):ny,
ixy=expand.grid(ix=ix,iy=iy))
}
\arguments{
\item{X}{
An object representing a spatial pattern
(point pattern, line segment pattern or window).
}
\item{nx,ny}{
Integers.
Numbers of additional copies of \code{X} in each direction.
The result will be a grid of \code{2 * nx + 1} by \code{2 * ny + 1}
copies of the original object.
(Overruled by \code{ix, iy, ixy}).
}
\item{\dots}{
Ignored.
}
\item{combine}{
Logical flag determining whether the copies should be superimposed
to make an object like \code{X} (if \code{combine=TRUE}) or
simply returned as a list of objects (\code{combine=FALSE}).
}
\item{warn}{
Logical flag determining whether to issue warnings.
}
\item{check}{
Logical flag determining whether to check the validity of the
combined pattern.
}
\item{ix, iy}{
Integer vectors determining the grid positions of the copies
of \code{X}. (Overruled by \code{ixy}).
}
\item{ixy}{
Matrix or data frame with two columns, giving the
grid positions of the copies of \code{X}.
}
}
\details{
Given a spatial pattern (point pattern, line segment pattern, etc)
this function makes a number of shifted copies of the pattern
and optionally combines them. The function \code{periodify} is
generic, with methods for various kinds of spatial objects.
The default is to make a 3 by 3 array of copies of \code{X} and
combine them into a single pattern of the same kind as \code{X}.
This can be used (for example) to compute toroidal or periodic
edge corrections for various operations on \code{X}.
If the arguments \code{nx}, \code{ny} are given
and other arguments are missing,
the original object will be copied \code{nx} times to the right
and \code{nx} times to the left, then \code{ny} times upward and
\code{ny} times downward, making \code{(2 * nx + 1) * (2 * ny + 1)}
copies altogether, arranged in a grid, centred on the original object.
If the arguments \code{ix}, \code{iy} or \code{ixy} are specified,
then these determine the grid positions of the copies of \code{X}
that will be made. For example \code{(ix,iy) = (1, 2)} means a
copy of \code{X} shifted by the vector \code{(ix * w, iy * h)} where
\code{w,h} are the width and height of the bounding rectangle of \code{X}.
If \code{combine=TRUE} (the default) the copies of \code{X} are
superimposed to create an object of the same kind as \code{X}.
If \code{combine=FALSE} the copies of \code{X} are returned as a list.
}
\value{
If \code{combine=TRUE}, an object of the same class as \code{X}.
If \code{combine=FALSE}, a list of objects of the same class as \code{X}.
}
\seealso{
\code{\link{shift}}
}
\examples{
data(cells)
plot(periodify(cells))
a <- lapply(periodify(cells$window, combine=FALSE),
plot, add=TRUE,lty=2)
}
\author{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}
and Rolf Turner \email{r.turner@auckland.ac.nz}
}
\keyword{spatial}
\keyword{manip}
|
\name{DAVIDToolChoices}
\alias{DAVIDToolChoices}
\alias{DAVIDTypeChoices}
\alias{DAVIDAnnotChoices}
\alias{DAVIDAffyChipChoices}
\docType{data}
\title{ Choices for the DAVID query parameters }
\description{
DAVIDToolChoices, DAVIDTypeChoices, DAVIDAnnotChoices and DAVIDAffyChipChoices are data structures used to construct pick menus, when the corresponding arguments to DAVIDQuery are not provided.
}
\details{
The source of these lists can be found at
\url{http://david.abcc.ncifcrf.gov/content.jsp?file=DAVID_API.html#input_list}.
The DAVIDToolChoices list is hardcoded within the package and includes an additional item representing the DAVID gene ID conversion tool.
The DAVIDTypeChoices and DAVIDAnnotChoices lists are retrieved from DAVID web services at a run time so the possible future alterations and additions
to these lists are likely to be handled automatically.
}
\source{
\url{http://david.abcc.ncifcrf.gov/content.jsp?file=DAVID_API.html#input_list}
}
\seealso{ \code{\link{DAVIDQuery}}, \code{\link{getAnnotationChoices}}, \code{\link{getIdConversionChoices}}, \code{\link{getAffyChipTypes}}}
\keyword{ database }
| /OtherPackages/DAVIDQuery/man/DAVIDToolChoices.Rd | no_license | rikenbit/PubMedQuery | R | false | false | 1,163 | rd | \name{DAVIDToolChoices}
\alias{DAVIDToolChoices}
\alias{DAVIDTypeChoices}
\alias{DAVIDAnnotChoices}
\alias{DAVIDAffyChipChoices}
\docType{data}
\title{ Choices for the DAVID query parameters }
\description{
DAVIDToolChoices, DAVIDTypeChoices, DAVIDAnnotChoices and DAVIDAffyChipChoices are data structures used to construct pick menus, when the corresponding arguments to DAVIDQuery are not provided.
}
\details{
The source of these lists can be found at
\url{http://david.abcc.ncifcrf.gov/content.jsp?file=DAVID_API.html#input_list}.
The DAVIDToolChoices list is hardcoded within the package and includes an additional item representing the DAVID gene ID conversion tool.
The DAVIDTypeChoices and DAVIDAnnotChoices lists are retrieved from DAVID web services at a run time so the possible future alterations and additions
to these lists are likely to be handled automatically.
}
\source{
\url{http://david.abcc.ncifcrf.gov/content.jsp?file=DAVID_API.html#input_list}
}
\seealso{ \code{\link{DAVIDQuery}}, \code{\link{getAnnotationChoices}}, \code{\link{getIdConversionChoices}}, \code{\link{getAffyChipTypes}}}
\keyword{ database }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Exponential.R
\name{quantile.Exponential}
\alias{quantile.Exponential}
\title{Determine quantiles of a Exponential distribution}
\usage{
\method{quantile}{Exponential}(d, p, ...)
}
\arguments{
\item{d}{A \code{Exponential} object created by a call to \code{\link[=Exponential]{Exponential()}}.}
\item{p}{A vector of probabilites.}
\item{...}{Unused. Unevaluated arguments will generate a warning to
catch mispellings or other possible errors.}
}
\value{
A vector of quantiles, one for each element of \code{p}.
}
\description{
\code{quantile()} is the inverse of \code{cdf()}.
}
\examples{
set.seed(27)
X <- Exponential(5)
X
mean(X)
variance(X)
skewness(X)
kurtosis(X)
random(X, 10)
pdf(X, 2)
log_pdf(X, 2)
cdf(X, 4)
quantile(X, 0.7)
cdf(X, quantile(X, 0.7))
quantile(X, cdf(X, 7))
}
| /man/quantile.exponential.Rd | permissive | nfultz/distributions3 | R | false | true | 871 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Exponential.R
\name{quantile.Exponential}
\alias{quantile.Exponential}
\title{Determine quantiles of a Exponential distribution}
\usage{
\method{quantile}{Exponential}(d, p, ...)
}
\arguments{
\item{d}{A \code{Exponential} object created by a call to \code{\link[=Exponential]{Exponential()}}.}
\item{p}{A vector of probabilites.}
\item{...}{Unused. Unevaluated arguments will generate a warning to
catch mispellings or other possible errors.}
}
\value{
A vector of quantiles, one for each element of \code{p}.
}
\description{
\code{quantile()} is the inverse of \code{cdf()}.
}
\examples{
set.seed(27)
X <- Exponential(5)
X
mean(X)
variance(X)
skewness(X)
kurtosis(X)
random(X, 10)
pdf(X, 2)
log_pdf(X, 2)
cdf(X, 4)
quantile(X, 0.7)
cdf(X, quantile(X, 0.7))
quantile(X, cdf(X, 7))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/402.p-Confidence_p-Bias_BASE_All_Graph.R
\name{PlotpCOpBIEX}
\alias{PlotpCOpBIEX}
\title{Plots of p-confidence and p-bias of Exact method given n and alpha level}
\usage{
PlotpCOpBIEX(n, alp, e)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{e}{- Exact method indicator in [0, 1] {1: Clopper Pearson, 0.5: Mid P}
The input can also be a range of values between 0 and 1.}
}
\value{
A dataframe with
\describe{
\item{x1}{ Number of successes (positive samples)}
\item{pconf }{ p-Confidence}
\item{pbias }{ p-Bias}
}
}
\description{
Plots of p-confidence and p-bias of Exact method given n and alpha level
}
\details{
Evaluation of Confidence interval for \code{p} based on inverting equal-tailed
binomial tests with null hypothesis \eqn{H0: p = p0} using p-confidence and p-bias for
the \eqn{n + 1} intervals
}
\examples{
n=5; alp=0.05;e=0.5; # Mid-p
PlotpCOpBIEX(n,alp,e)
n=5; alp=0.05;e=1; #Clopper-Pearson
PlotpCOpBIEX(n,alp,e)
n=5; alp=0.05;e=c(0.1,0.5,0.95,1); #Range including Mid-p and Clopper-Pearson
PlotpCOpBIEX(n,alp,e)
}
\references{
[1] 2005 Vos PW and Hudson S.
Evaluation Criteria for Discrete Confidence Intervals: Beyond Coverage and Length.
The American Statistician: 59; 137 - 142.
}
\seealso{
Other p-confidence and p-bias of base methods: \code{\link{PlotpCOpBIAS}},
\code{\link{PlotpCOpBIAll}}, \code{\link{PlotpCOpBIBA}},
\code{\link{PlotpCOpBILR}}, \code{\link{PlotpCOpBILT}},
\code{\link{PlotpCOpBISC}}, \code{\link{PlotpCOpBITW}},
\code{\link{PlotpCOpBIWD}}, \code{\link{pCOpBIAS}},
\code{\link{pCOpBIAll}}, \code{\link{pCOpBIBA}},
\code{\link{pCOpBIEX}}, \code{\link{pCOpBILR}},
\code{\link{pCOpBILT}}, \code{\link{pCOpBISC}},
\code{\link{pCOpBITW}}, \code{\link{pCOpBIWD}}
}
| /man/PlotpCOpBIEX.Rd | no_license | cran/proportion | R | false | true | 1,920 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/402.p-Confidence_p-Bias_BASE_All_Graph.R
\name{PlotpCOpBIEX}
\alias{PlotpCOpBIEX}
\title{Plots of p-confidence and p-bias of Exact method given n and alpha level}
\usage{
PlotpCOpBIEX(n, alp, e)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{e}{- Exact method indicator in [0, 1] {1: Clopper Pearson, 0.5: Mid P}
The input can also be a range of values between 0 and 1.}
}
\value{
A dataframe with
\describe{
\item{x1}{ Number of successes (positive samples)}
\item{pconf }{ p-Confidence}
\item{pbias }{ p-Bias}
}
}
\description{
Plots of p-confidence and p-bias of Exact method given n and alpha level
}
\details{
Evaluation of Confidence interval for \code{p} based on inverting equal-tailed
binomial tests with null hypothesis \eqn{H0: p = p0} using p-confidence and p-bias for
the \eqn{n + 1} intervals
}
\examples{
n=5; alp=0.05;e=0.5; # Mid-p
PlotpCOpBIEX(n,alp,e)
n=5; alp=0.05;e=1; #Clopper-Pearson
PlotpCOpBIEX(n,alp,e)
n=5; alp=0.05;e=c(0.1,0.5,0.95,1); #Range including Mid-p and Clopper-Pearson
PlotpCOpBIEX(n,alp,e)
}
\references{
[1] 2005 Vos PW and Hudson S.
Evaluation Criteria for Discrete Confidence Intervals: Beyond Coverage and Length.
The American Statistician: 59; 137 - 142.
}
\seealso{
Other p-confidence and p-bias of base methods: \code{\link{PlotpCOpBIAS}},
\code{\link{PlotpCOpBIAll}}, \code{\link{PlotpCOpBIBA}},
\code{\link{PlotpCOpBILR}}, \code{\link{PlotpCOpBILT}},
\code{\link{PlotpCOpBISC}}, \code{\link{PlotpCOpBITW}},
\code{\link{PlotpCOpBIWD}}, \code{\link{pCOpBIAS}},
\code{\link{pCOpBIAll}}, \code{\link{pCOpBIBA}},
\code{\link{pCOpBIEX}}, \code{\link{pCOpBILR}},
\code{\link{pCOpBILT}}, \code{\link{pCOpBISC}},
\code{\link{pCOpBITW}}, \code{\link{pCOpBIWD}}
}
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------#
# Small, miscellaneous functions for use throughout PECAn
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
##' return MstMIP variable as ncvar
##'
##' returns a MstMIP variable as a ncvar based on name and other parameters
##' passed in.
##'
##' @title MstMIP variable
##' @export
##' @param name name of variable
##' @param lat latitude if dimension requests it
##' @param lon longitude if dimension requests it
##' @param time time if dimension requests it
##' @param nsoil nsoil if dimension requests it
##' @return ncvar based on MstMIP definition
##' @author Rob Kooper
mstmipvar <- function(name, lat = NA, lon = NA, time = NA, nsoil = NA, silent = FALSE) {
data(mstmip_vars, package = "PEcAn.utils")
var <- mstmip_vars[mstmip_vars$Variable.Name == name, ]
dims <- list()
if (nrow(var) == 0) {
data(mstmip_local, package = "PEcAn.utils")
var <- mstmip_local[mstmip_local$Variable.Name == name, ]
if (nrow(var) == 0) {
if (!silent) {
logger.info("Don't know about variable", name, " in mstmip_vars in PEcAn.utils")
}
if (is.na(time)) {
time <- ncdf4::ncdim_def(name = "time", units = "days since 1900-01-01 00:00:00",
vals = 1:365, calendar = "standard", unlim = TRUE)
}
return(ncdf4::ncvar_def(name, "", list(time), -999, name))
}
}
for (i in 1:4) {
vd <- var[[paste0("dim", i)]]
if (vd == "lon" && !is.na(lon)) {
dims[[length(dims) + 1]] <- lon
} else if (vd == "lat" && !is.na(lat)) {
dims[[length(dims) + 1]] <- lat
} else if (vd == "time" && !is.na(time)) {
dims[[length(dims) + 1]] <- time
} else if (vd == "nsoil" && !is.na(nsoil)) {
dims[[length(dims) + 1]] <- nsoil
} else if (vd == "na") {
# skip
} else {
if (!silent) {
logger.info("Don't know dimension for", vd, "for variable", name)
}
}
}
ncvar <- ncdf4::ncvar_def(name, as.character(var$Units), dims, -999)
if (var$Long.name != "na") {
ncvar$longname <- as.character(var$Long.name)
}
return(ncvar)
} # mstimipvar
#--------------------------------------------------------------------------------------------------#
##' left padded by zeros up to a given number of digits.
##'
##' returns a string representing a given number
##' @title Left Pad Zeros
##' @export
##' @param num number to be padded (integer)
##' @param digits number of digits to add
##' @return num with zeros to the left
##' @export
##' @author Carl Davidson
left.pad.zeros <- function(num, digits = 5) {
format_string <- paste0("%", sprintf("0%.0f.0f", digits))
return(sprintf(format_string, num))
} # left.pad.zeros
##' Truncates vector at 0
##' @name zero.truncate
##' @title Zero Truncate
##' @param y numeric vector
##' @return numeric vector with all values less than 0 set to 0
##' @export
##' @author <unknown>
zero.truncate <- function(y) {
y[y < 0 | is.na(y)] <- 0
return(y)
} # zero.truncate
#--------------------------------------------------------------------------------------------------#
##' R implementation of rsync
##'
##' rsync is a file copying tool in bash
##' @title rsync
##' @param args rsync arguments (see man rsync)
##' @param from source
##' @param to destination
##' @param pattern file pattern to be matched
##' @return nothing, transfers files as a side effect
##' @export
##' @author David LeBauer
##' @author Shawn Serbin
#--------------------------------------------------------------------------------------------------#
rsync <- function(args, from, to, pattern = "") {
logger.warn("NEED TO USE TUNNEL")
system(paste0("rsync", " ", args, " ", from, pattern, " ", to), intern = TRUE)
} # rsync
#--------------------------------------------------------------------------------------------------#
##' R implementation of SSH
##'
##' @title SSH
##' @param host
##' @param ...
##' @param args
##' @export
#--------------------------------------------------------------------------------------------------#
ssh <- function(host, ..., args = "") {
logger.warn("NEED TO USE TUNNEL")
if (host == "localhost") {
command <- paste(..., args, sep = "")
} else {
command <- paste("ssh -T ", host, " \"", ..., "\" ", args, sep = "")
}
system(command)
} # ssh
#--------------------------------------------------------------------------------------------------#
##' Convert vector to comma delimited string
##'
##' vecpaste, turns vector into comma delimited string fit for SQL statements.
##' @title vecpaste
##' @param x vector
##' @return comma delimited string
##' @export
vecpaste <- function(x) paste(paste0("'", x, "'"), collapse = ",")
#--------------------------------------------------------------------------------------------------#
##' returns an id representing a model run
##'
##' Provides a consistent method of naming runs; for use in model input files and indices
##' @title Get Run ID
##' @param run.type character, can be any character; currently 'SA' is used for sensitivity analysis, 'ENS' for ensemble run.
##' @param index unique index for different runs, e.g. integer counting members of an
##' ensemble or a quantile used to which a trait has been perturbed for sensitivity analysis
##' @param trait name of trait being sampled (for sensitivity analysis)
##' @param pft.name name of PFT (value from pfts.names field in database)
##' @return id representing a model run
##' @export
##' @examples
##' get.run.id('ENS', left.pad.zeros(1, 5))
##' get.run.id('SA', round(qnorm(-3),3), trait = 'Vcmax')
##' @author Carl Davidson, David LeBauer
#--------------------------------------------------------------------------------------------------#
get.run.id <- function(run.type, index, trait = NULL, pft.name = NULL) {
result <- paste(c(run.type, pft.name, trait, index), collapse = "-")
return(result)
} # get.run.id
##' @export
listToXml <- function(x, ...) {
UseMethod("listToXml")
} # listToXml
#--------------------------------------------------------------------------------------------------#
##' Convert List to XML
##'
##' Can convert list or other object to an xml object using xmlNode
##' @title List to XML
##' @param item
##' @param tag xml tag
##' @return xmlNode
##' @export
##' @author David LeBauer, Carl Davidson, Rob Kooper
#--------------------------------------------------------------------------------------------------#
listToXml.default <- function(item, tag) {
# just a textnode, or empty node with attributes
if (typeof(item) != "list") {
if (length(item) > 1) {
xml <- XML::xmlNode(tag)
for (name in names(item)) {
XML::xmlAttrs(xml)[[name]] <- item[[name]]
}
return(xml)
} else {
return(XML::xmlNode(tag, item))
}
}
# create the node
if (identical(names(item), c("text", ".attrs"))) {
# special case a node with text and attributes
xml <- XML::xmlNode(tag, item[["text"]])
} else {
# node with child nodes
xml <- XML::xmlNode(tag)
for (i in seq_along(item)) {
if (is.null(names(item)) || names(item)[i] != ".attrs") {
xml <- XML::append.xmlNode(xml, listToXml(item[[i]], names(item)[i]))
}
}
}
# add attributes to node
attrs <- item[[".attrs"]]
for (name in names(attrs)) {
XML::xmlAttrs(xml)[[name]] <- attrs[[name]]
}
return(xml)
} # listToXml.default
#--------------------------------------------------------------------------------------------------#
##' Zero bounded density using log density transform
##'
##' Provides a zero bounded density estimate of a parameter.
##' Kernel Density Estimation used by the \code{\link{stats::density}} function will cause problems at the left hand end because it will put some weight on negative values. One useful approach is to transform to logs, estimate the density using KDE, and then transform back.
##' @title Zero Bounded Density
##' @param x
##' @param bw The smoothing bandwidth to be used. See 'bw.nrd'
##' @return data frame with back-transformed log density estimate
##' @author \href{http://stats.stackexchange.com/q/6588/2750}{Rob Hyndman}
##' @references M. P. Wand, J. S. Marron and D. Ruppert, 1991. Transformations in Density Estimation. Journal of the American Statistical Association. 86(414):343-353 \url{http://www.jstor.org/stable/2290569}
zero.bounded.density <- function(x, bw = "SJ", n = 1001) {
y <- log(x)
g <- density(y, bw = bw, n = n)
xgrid <- exp(g$x)
g$y <- c(0, g$y / xgrid)
g$x <- c(0, xgrid)
return(g)
} # zero.bounded.density
#--------------------------------------------------------------------------------------------------#
##' Summarize results of replicate observations in trait data query
##'
##' @title Summarize Results
##' @param result dataframe with results of trait data query
##' @return result with replicate observations summarized
##' @export
##' @author David LeBauer
summarize.result <- function(result) {
ans1 <- plyr::ddply(result[result$n == 1, ],
plyr::.(citation_id, site_id, trt_id, control, greenhouse,
date, time, cultivar_id, specie_id),
plyr::summarise, n = length(n),
mean = mean(mean),
statname = ifelse(length(n) == 1, "none", "SE"),
stat = sd(mean) / sqrt(length(n)))
ans2 <- result[result$n != 1, colnames(ans1)]
return(rbind(ans1, ans2))
} # summarize.result
#--------------------------------------------------------------------------------------------------#
##' Further summarizes output from summary.mcmc
##'
##' @title Get stats for parameters in MCMC output
##' @param mcmc.summary
##' @param sample.size
##' @return list with summary statistics for parameters in an MCMC chain
##' @author David LeBauer
get.stats.mcmc <- function(mcmc.summary, sample.size) {
a <- list(n = sample.size)
for (parm in c("beta.o", "sd.y", "sd.site", "sd.trt", "beta.ghs[2]")) {
parm.name <- ifelse(parm == "beta.ghs[2]", "beta.ghs", parm)
if (parm %in% rownames(mcmc.summary$statistics)) {
a[[parm.name]] <- get.parameter.stat(mcmc.summary, parameter = parm)
} else {
a[[parm.name]] <- NA
}
}
return(unlist(a))
} # get.stats.mcmc
#--------------------------------------------------------------------------------------------------#
##' A helper function for building a LaTex table.
##'
##' Used by \code{\link{get.parameter.stat}}.
##' @title Paste Stats
##' @name paste.stats
##' @param mcmc.summary
##' @param median
##' @param lcl
##' @param ucl
##' @param n
##' @export
##' @author David LeBauer
paste.stats <- function(mcmc.summary, median, lcl, ucl, n = 2) {
paste0("$", tabnum(median, n),
"(", tabnum(lcl, n), ",", tabnum(ucl, n), ")",
"$")
} # paste.stats
#--------------------------------------------------------------------------------------------------#
##' Gets statistics for LaTeX - formatted table
##'
##' @title Get Parameter Statistics
##' @param mcmc.summary
##' @param parameter
##' @return table with parameter statistics
##' @author David LeBauer
##' @export
##' @examples
##' \dontrun{get.parameter.stat(mcmc.summaries[[1]], 'beta.o')}
get.parameter.stat <- function(mcmc.summary, parameter) {
paste.stats(median = mcmc.summary$quantiles[parameter, "50%"],
lcl = mcmc.summary$quantiles[parameter, c("2.5%")],
ucl = mcmc.summary$quantiles[parameter, c("97.5%")],
n = 2)
} # get.parameter.stat
#--------------------------------------------------------------------------------------------------#
##' Calculate mean, variance statistics, and CI from a known distribution
##'
##' @title Probability Distirbution Function Statistics
##' @param distn name of distribution used by R (beta, f, gamma, lnorm, norm, weibull)
##' @param A first parameter
##' @param B second parameter
##' @return list with mean, variance, and 95 CI
##' @author David LeBauer
## in future, perhaps create S3 functions: get.stats.pdf <- pdf.stats
pdf.stats <- function(distn, A, B) {
distn <- as.character(distn)
mean <- switch(distn, gamma = A/B, lnorm = exp(A + 1/2 * B^2), beta = A/(A +
B), weibull = B * gamma(1 + 1/A), norm = A, f = ifelse(B > 2, B/(B - 2),
mean(rf(10000, A, B))))
var <- switch(distn, gamma = A/B^2,
lnorm = exp(2 * A + B ^ 2) * (exp(B ^ 2) - 1),
beta = A * B/((A + B) ^ 2 * (A + B + 1)),
weibull = B ^ 2 * (gamma(1 + 2 / A) -
gamma(1 + 1 / A) ^ 2),
norm = B ^ 2, f = ifelse(B > 4,
2 * B^2 * (A + B - 2) / (A * (B - 2) ^ 2 * (B - 4)),
var(rf(1e+05, A, B))))
qci <- get(paste0("q", distn))
ci <- qci(c(0.025, 0.975), A, B)
lcl <- ci[1]
ucl <- ci[2]
out <- unlist(list(mean = mean, var = var, lcl = lcl, ucl = ucl))
return(out)
} # pdf.stats
#--------------------------------------------------------------------------------------------------#
##' Dictionary of terms used to identify traits in ed, filenames, and figures
##'
##' @return a dataframe with id, the name used by ED and PEcAn database for a parameter; fileid, an abbreviated
##' name used for files; figid, the parameter name written out as best known in english for figures
##' and tables.
##'
##' @param traits a vector of trait names, if traits = NULL, all of the traits will be returned.
##' @export
##' @examples
##' # convert parameter name to a string appropriate for end-use plotting
##' \dontrun{
##' trait.lookup('growth_resp_factor')
##' trait.lookup('growth_resp_factor')$figid
##'
##' # get a list of all traits and units in dictionary
##' trait.lookup()[,c('figid', 'units')]
##' }
trait.lookup <- function(traits = NULL) {
# HACK: shameless hack Ultimately we'll want this to be read once at the start of
# run time This could also be represented in the database, but because it is used
# to determine which parameters to feed to the model, it could be argued that
# it's conceptually model specific
data(trait.dictionary)
if (is.null(traits)) {
trait.defs <- trait.dictionary
} else {
trait.defs <- trait.dictionary[match(traits, trait.dictionary$id), ]
}
return(trait.defs)
} # trait.lookup
#--------------------------------------------------------------------------------------------------#
##' Convert number to n significant digits
##'
##' @title Table numbers
##' @param x numeric value or vector
##' @param n number of significant figures
##' @export
##' @author David LeBauer
##' @return x rounded to n significant figures
##' @examples
##' tabnum(1.2345)
##' tabnum(1.2345, n = 4)
tabnum <- function(x, n = 3) {
ans <- as.numeric(signif(x, n))
names(ans) <- names(x)
return(ans)
} # tabnum
#--------------------------------------------------------------------------------------------------#
##' Scale temperature dependent trait from measurement temperature to reference temperature
##'
##' @title Arrhenius scaling
##' @param observed.value observed value of temperature dependent trait, e.g. Vcmax, root respiration rate
##' @param old.temp temperature at which measurement was taken or previously scaled to
##' @param new.temp temperature to be scaled to, default = 25 C
##' @return numeric value at reference temperature
##' @export
##' @author unknown
arrhenius.scaling <- function(observed.value, old.temp, new.temp = 25) {
return(observed.value / exp(3000 * (1 / (273.15 + new.temp) - 1 / (273.15 + old.temp))))
} # arrhenius.scaling
#--------------------------------------------------------------------------------------------------#
##' Capitalize a string
##'
##' @title Capitalize a string
##' @param x string
##' @return x, capitalized
##' @author David LeBauer
#--------------------------------------------------------------------------------------------------#
capitalize <- function(x) {
x <- as.character(x)
s <- strsplit(x, " ")[[1]]
return(paste(toupper(substring(s, 1, 1)), substring(s, 2), sep = "", collapse = " "))
} # capitalize
isFALSE <- function(x) !isTRUE(x)
#--------------------------------------------------------------------------------------------------#
##' New xtable
##'
##' utility to properly escape the '%' sign for latex
##' @title newxtable
##' @param x data.frame to be converted to latex table
##' @param environment can be 'table'; 'sidewaystable' if using latex rotating package
##' @param table.placement
##' @param label
##' @param caption
##' @param caption.placement
##' @param align
##' @return Latex version of table, with percentages properly formatted
##' @author David LeBauer
newxtable <- function(x, environment = "table", table.placement = "ht", label = NULL,
caption = NULL, caption.placement = NULL, align = NULL) {
print(xtable(x, label = label, caption = caption, align = align),
floating.environment = environment,
table.placement = table.placement,
caption.placement = caption.placement,
# sanitize.text.function = function(x) gsub("%", "\\\\%", x),
sanitize.rownames.function = function(x) paste(''))
} # newxtable
#--------------------------------------------------------------------------------------------------#
##' Convert author, year, title to bibtex citation format
##'
##' Converts author year title to author1999abc format
##' @title bibtexify
##' @param author name of first author
##' @param year year of publication
##' @param title manuscript title
##' @return bibtex citation
#--------------------------------------------------------------------------------------------------#
bibtexify <- function(author, year, title) {
acronym <- abbreviate(title, minlength = 3, strict = TRUE)
return(paste0(author, year, acronym))
} # bibtexify
#--------------------------------------------------------------------------------------------------#
##' Convert categorical variable into sequential integers
##'
##' Turns any categorical variable into a sequential integer.
##' This transformation is required for using data in BUGS/JAGS
##' @title as.sequence
##' @param x categorical variable as vector
##' @param na.rm logical: return NA's or replace with max(x) + 1
##' @return sequence from 1:length(unique(x))
##' @export
##' @author David LeBauer
as.sequence <- function(x, na.rm = TRUE) {
x2 <- as.integer(factor(x, unique(x)))
if (all(is.na(x2))) {
x2 <- rep(1, length(x2))
}
if (na.rm == TRUE) {
x2[is.na(x2)] <- max(x2, na.rm = TRUE) + 1
}
return(x2)
} # as.sequence
#--------------------------------------------------------------------------------------------------#
##' Test ssh access
##'
##' Test to determine if access to a remote server is available.
##' Can be used to exclude / include tests or to prevent / identify access errors
##' @title Test Remote
##' @param host
##' @return logical - TRUE if remote connection is available
##' @author Rob Kooper
test.remote <- function(host) {
return(try(remote.execute.cmd(host, "/bin/true")) == 0)
} # test.remote
##' Create a temporary settings file
##'
##' Uses \code{\link{tempfile}} function to provide a valid temporary file (OS independent)
##' Useful for testing functions that depend on settings file
##' Reference: http://stackoverflow.com/a/12940705/199217
##' @title temp.settings
##' @param settings.txt
##' @return character vector written to and read from a temporary file
##' @export
##' @author David LeBauer
temp.settings <- function(settings.txt) {
temp <- tempfile()
on.exit(unlink(temp))
writeLines(settings.txt, con = temp)
settings <- readLines(temp)
return(settings)
} # temp.settings
##' Test if function gives an error
##'
##' adaptation of try that returns a logical value (FALSE if error)
##' @title tryl
##' @param FUN function to be evaluated for error
##' @return FALSE if function returns error; else TRUE
##' @export
##' @examples
##' tryl(1+1)
##' # TRUE
##' tryl(sum('a'))
##' # FALSE
##' @author David LeBauer
tryl <- function(FUN) {
out <- tryCatch(FUN, error = function(e) e)
ans <- !any(class(out) == "error")
return(ans)
} # tryl
##' load model package
##' @title Load model package
##' @param model name of model
##' @return FALSE if function returns error; else TRUE
##' @export
##' @examples
##' \dontrun{require.modelpkg(BioCro)}
##' @author David LeBauer
load.modelpkg <- function(model) {
pecan.modelpkg <- paste0("PEcAn.", model)
if (!pecan.modelpkg %in% names(sessionInfo()$otherPkgs)) {
if (pecan.modelpkg %in% rownames(installed.packages())) {
do.call(require, args = list(pecan.modelpkg))
} else {
logger.error("I can't find a package for the ", model,
"model; I expect it to be named ", pecan.modelpkg)
}
}
} # load.modelpkg
##' conversion function for the unit conversions that udunits cannot handle but often needed in PEcAn calculations
##' @title misc.convert
##' @export
##' @param x convertible values
##' @param u1 unit to be converted from, character
##' @param u2 unit to be converted to, character
##' @return val converted values
##' @author Istem Fer, Shawn Serbin
misc.convert <- function(x, u1, u2) {
amC <- PeriodicTable::mass("C") # atomic mass of carbon
mmH2O <- sum(PeriodicTable::mass(c("H", "H", "O"))) # molar mass of H2O, g/mol
if (u1 == "umol C m-2 s-1" & u2 == "kg C m-2 s-1") {
val <- udunits2::ud.convert(x, "ug", "kg") * amC
} else if (u1 == "kg C m-2 s-1" & u2 == "umol C m-2 s-1") {
val <- udunits2::ud.convert(x, "kg", "ug") / amC
} else if (u1 == "mol H2O m-2 s-1" & u2 == "kg H2O m-2 s-1") {
val <- udunits2::ud.convert(x, "g", "kg") * mmH2O
} else if (u1 == "kg H2O m-2 s-1" & u2 == "mol H2O m-2 s-1") {
val <- udunits2::ud.convert(x, "kg", "g") / mmH2O
} else if (u1 == "Mg ha-1" & u2 == "kg C m-2") {
val <- x * udunits2::ud.convert(1, "Mg", "kg") * udunits2::ud.convert(1, "ha-1", "m-2")
} else if (u1 == "kg C m-2" & u2 == "Mg ha-1") {
val <- x * udunits2::ud.convert(1, "kg", "Mg") * udunits2::ud.convert(1, "m-2", "ha-1")
} else {
u1 <- gsub("gC","g*12",u1)
u2 <- gsub("gC","g*12",u2)
val <- udunits2::ud.convert(x,u1,u2)
# logger.severe(paste("Unknown units", u1, u2))
}
return(val)
} # misc.convert
##' function to check whether units are convertible by misc.convert function
##' @title misc.are.convertible
##' @export
##' @param u1 unit to be converted from, character
##' @param u2 unit to be converted to, character
##' @return logical
##' @author Istem Fer, Shawn Serbin
misc.are.convertible <- function(u1, u2) {
# make sure the order of vectors match
units.from <- c("umol C m-2 s-1", "kg C m-2 s-1",
"mol H2O m-2 s-1", "kg H2O m-2 s-1",
"Mg ha-1", "kg C m-2")
units.to <- c("kg C m-2 s-1", "umol C m-2 s-1",
"kg H2O m-2 s-1", "mol H2O m-2 s-1",
"kg C m-2", "Mg ha-1")
if(u1 %in% units.from & u2 %in% units.to) {
if (which(units.from == u1) == which(units.to == u2)) {
return(TRUE)
} else {
return(FALSE)
}
} else {
return(FALSE)
}
}
##' Convert expression to variable names
##' @title convert.expr
##' @param expression expression string
##' @return list
##' @export
##' @author Istem Fer
convert.expr <- function(expression) {
# split equation to LHS and RHS
deri.var <- gsub("=.*$", "", expression) # name of the derived variable
deri.eqn <- gsub(".*=", "", expression) # derivation eqn
non.match <- gregexpr('[^a-zA-Z_.]', deri.eqn) # match characters that are not "a-zA-Z_."
split.chars <- unlist(regmatches(deri.eqn, non.match)) # where to split at
# split the expression to retrieve variable names to be used in read.output
if(length(split.chars)!=0){
variables <- unlist(strsplit(deri.eqn, paste0("[",noquote(paste0(split.chars, collapse="")),"]")))
variables <- variables[variables != ""] # Remove empty entries
} else {
variables <- deri.eqn
}
return(list(variable.drv = deri.var, variable.eqn = list(variables = variables, expression = deri.eqn)))
}
##' Simple function to use ncftpget for FTP downloads behind a firewall.
##' Requires ncftpget and a properly formatted config file in the users
##' home directory
##' @title download.file
##' @param url complete URL for file download
##' @param filename destination file name
##' @param method Method of file retrieval. Can set this using the options(download.ftp.method=[method]) in your Rprofile.
##' example options(download.ftp.method="ncftpget")
##'
##' @examples
##' download.file("http://lib.stat.cmu.edu/datasets/csb/ch11b.txt","~/test.download.txt")
##'
##' @examples
##' \dontrun{
##' download.file("ftp://ftp.cdc.noaa.gov/Datasets/NARR/monolevel/pres.sfc.2000.nc", "~/pres.sfc.2000.nc")
##' }
##'
##' @export
##'
##' @author Shawn Serbin, Rob Kooper
download.file <- function(url, filename, method) {
if (startsWith(url, "ftp://")) {
method <- if (missing(method)) getOption("download.ftp.method", default = "auto")
if (method == "ncftpget") {
logger.debug(paste0("FTP Method: ",method))
#system2("ncftpget", c("-c", "url", ">", filename))
system(paste(method,"-c",url,">",filename,sep=" "))
} else {
utils::download.file(url, filename, method)
}
} else {
utils::download.file(url, filename)
}
}
####################################################################################################
### EOF. End of R script file.
####################################################################################################
| /utils/R/utils.R | permissive | serbinsh/pecan | R | false | false | 26,618 | r | #-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------#
# Small, miscellaneous functions for use throughout PECAn
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
##' return MstMIP variable as ncvar
##'
##' returns a MstMIP variable as a ncvar based on name and other parameters
##' passed in.
##'
##' @title MstMIP variable
##' @export
##' @param name name of variable
##' @param lat latitude if dimension requests it
##' @param lon longitude if dimension requests it
##' @param time time if dimension requests it
##' @param nsoil nsoil if dimension requests it
##' @return ncvar based on MstMIP definition
##' @author Rob Kooper
mstmipvar <- function(name, lat = NA, lon = NA, time = NA, nsoil = NA, silent = FALSE) {
data(mstmip_vars, package = "PEcAn.utils")
var <- mstmip_vars[mstmip_vars$Variable.Name == name, ]
dims <- list()
if (nrow(var) == 0) {
data(mstmip_local, package = "PEcAn.utils")
var <- mstmip_local[mstmip_local$Variable.Name == name, ]
if (nrow(var) == 0) {
if (!silent) {
logger.info("Don't know about variable", name, " in mstmip_vars in PEcAn.utils")
}
if (is.na(time)) {
time <- ncdf4::ncdim_def(name = "time", units = "days since 1900-01-01 00:00:00",
vals = 1:365, calendar = "standard", unlim = TRUE)
}
return(ncdf4::ncvar_def(name, "", list(time), -999, name))
}
}
for (i in 1:4) {
vd <- var[[paste0("dim", i)]]
if (vd == "lon" && !is.na(lon)) {
dims[[length(dims) + 1]] <- lon
} else if (vd == "lat" && !is.na(lat)) {
dims[[length(dims) + 1]] <- lat
} else if (vd == "time" && !is.na(time)) {
dims[[length(dims) + 1]] <- time
} else if (vd == "nsoil" && !is.na(nsoil)) {
dims[[length(dims) + 1]] <- nsoil
} else if (vd == "na") {
# skip
} else {
if (!silent) {
logger.info("Don't know dimension for", vd, "for variable", name)
}
}
}
ncvar <- ncdf4::ncvar_def(name, as.character(var$Units), dims, -999)
if (var$Long.name != "na") {
ncvar$longname <- as.character(var$Long.name)
}
return(ncvar)
} # mstimipvar
#--------------------------------------------------------------------------------------------------#
##' left padded by zeros up to a given number of digits.
##'
##' returns a string representing a given number
##' @title Left Pad Zeros
##' @export
##' @param num number to be padded (integer)
##' @param digits number of digits to add
##' @return num with zeros to the left
##' @export
##' @author Carl Davidson
left.pad.zeros <- function(num, digits = 5) {
format_string <- paste0("%", sprintf("0%.0f.0f", digits))
return(sprintf(format_string, num))
} # left.pad.zeros
##' Truncates vector at 0
##' @name zero.truncate
##' @title Zero Truncate
##' @param y numeric vector
##' @return numeric vector with all values less than 0 set to 0
##' @export
##' @author <unknown>
zero.truncate <- function(y) {
y[y < 0 | is.na(y)] <- 0
return(y)
} # zero.truncate
#--------------------------------------------------------------------------------------------------#
##' R implementation of rsync
##'
##' rsync is a file copying tool in bash
##' @title rsync
##' @param args rsync arguments (see man rsync)
##' @param from source
##' @param to destination
##' @param pattern file pattern to be matched
##' @return nothing, transfers files as a side effect
##' @export
##' @author David LeBauer
##' @author Shawn Serbin
#--------------------------------------------------------------------------------------------------#
rsync <- function(args, from, to, pattern = "") {
logger.warn("NEED TO USE TUNNEL")
system(paste0("rsync", " ", args, " ", from, pattern, " ", to), intern = TRUE)
} # rsync
#--------------------------------------------------------------------------------------------------#
##' R implementation of SSH
##'
##' @title SSH
##' @param host
##' @param ...
##' @param args
##' @export
#--------------------------------------------------------------------------------------------------#
ssh <- function(host, ..., args = "") {
logger.warn("NEED TO USE TUNNEL")
if (host == "localhost") {
command <- paste(..., args, sep = "")
} else {
command <- paste("ssh -T ", host, " \"", ..., "\" ", args, sep = "")
}
system(command)
} # ssh
#--------------------------------------------------------------------------------------------------#
##' Convert vector to comma delimited string
##'
##' vecpaste, turns vector into comma delimited string fit for SQL statements.
##' @title vecpaste
##' @param x vector
##' @return comma delimited string
##' @export
vecpaste <- function(x) paste(paste0("'", x, "'"), collapse = ",")
#--------------------------------------------------------------------------------------------------#
##' returns an id representing a model run
##'
##' Provides a consistent method of naming runs; for use in model input files and indices
##' @title Get Run ID
##' @param run.type character, can be any character; currently 'SA' is used for sensitivity analysis, 'ENS' for ensemble run.
##' @param index unique index for different runs, e.g. integer counting members of an
##' ensemble or a quantile used to which a trait has been perturbed for sensitivity analysis
##' @param trait name of trait being sampled (for sensitivity analysis)
##' @param pft.name name of PFT (value from pfts.names field in database)
##' @return id representing a model run
##' @export
##' @examples
##' get.run.id('ENS', left.pad.zeros(1, 5))
##' get.run.id('SA', round(qnorm(-3),3), trait = 'Vcmax')
##' @author Carl Davidson, David LeBauer
#--------------------------------------------------------------------------------------------------#
get.run.id <- function(run.type, index, trait = NULL, pft.name = NULL) {
result <- paste(c(run.type, pft.name, trait, index), collapse = "-")
return(result)
} # get.run.id
##' @export
listToXml <- function(x, ...) {
UseMethod("listToXml")
} # listToXml
#--------------------------------------------------------------------------------------------------#
##' Convert List to XML
##'
##' Can convert list or other object to an xml object using xmlNode
##' @title List to XML
##' @param item
##' @param tag xml tag
##' @return xmlNode
##' @export
##' @author David LeBauer, Carl Davidson, Rob Kooper
#--------------------------------------------------------------------------------------------------#
listToXml.default <- function(item, tag) {
# just a textnode, or empty node with attributes
if (typeof(item) != "list") {
if (length(item) > 1) {
xml <- XML::xmlNode(tag)
for (name in names(item)) {
XML::xmlAttrs(xml)[[name]] <- item[[name]]
}
return(xml)
} else {
return(XML::xmlNode(tag, item))
}
}
# create the node
if (identical(names(item), c("text", ".attrs"))) {
# special case a node with text and attributes
xml <- XML::xmlNode(tag, item[["text"]])
} else {
# node with child nodes
xml <- XML::xmlNode(tag)
for (i in seq_along(item)) {
if (is.null(names(item)) || names(item)[i] != ".attrs") {
xml <- XML::append.xmlNode(xml, listToXml(item[[i]], names(item)[i]))
}
}
}
# add attributes to node
attrs <- item[[".attrs"]]
for (name in names(attrs)) {
XML::xmlAttrs(xml)[[name]] <- attrs[[name]]
}
return(xml)
} # listToXml.default
#--------------------------------------------------------------------------------------------------#
##' Zero bounded density using log density transform
##'
##' Provides a zero bounded density estimate of a parameter.
##' Kernel Density Estimation used by the \code{\link{stats::density}} function will cause problems at the left hand end because it will put some weight on negative values. One useful approach is to transform to logs, estimate the density using KDE, and then transform back.
##' @title Zero Bounded Density
##' @param x
##' @param bw The smoothing bandwidth to be used. See 'bw.nrd'
##' @return data frame with back-transformed log density estimate
##' @author \href{http://stats.stackexchange.com/q/6588/2750}{Rob Hyndman}
##' @references M. P. Wand, J. S. Marron and D. Ruppert, 1991. Transformations in Density Estimation. Journal of the American Statistical Association. 86(414):343-353 \url{http://www.jstor.org/stable/2290569}
zero.bounded.density <- function(x, bw = "SJ", n = 1001) {
y <- log(x)
g <- density(y, bw = bw, n = n)
xgrid <- exp(g$x)
g$y <- c(0, g$y / xgrid)
g$x <- c(0, xgrid)
return(g)
} # zero.bounded.density
#--------------------------------------------------------------------------------------------------#
##' Summarize results of replicate observations in trait data query
##'
##' @title Summarize Results
##' @param result dataframe with results of trait data query
##' @return result with replicate observations summarized
##' @export
##' @author David LeBauer
summarize.result <- function(result) {
ans1 <- plyr::ddply(result[result$n == 1, ],
plyr::.(citation_id, site_id, trt_id, control, greenhouse,
date, time, cultivar_id, specie_id),
plyr::summarise, n = length(n),
mean = mean(mean),
statname = ifelse(length(n) == 1, "none", "SE"),
stat = sd(mean) / sqrt(length(n)))
ans2 <- result[result$n != 1, colnames(ans1)]
return(rbind(ans1, ans2))
} # summarize.result
#--------------------------------------------------------------------------------------------------#
##' Further summarizes output from summary.mcmc
##'
##' @title Get stats for parameters in MCMC output
##' @param mcmc.summary
##' @param sample.size
##' @return list with summary statistics for parameters in an MCMC chain
##' @author David LeBauer
get.stats.mcmc <- function(mcmc.summary, sample.size) {
a <- list(n = sample.size)
for (parm in c("beta.o", "sd.y", "sd.site", "sd.trt", "beta.ghs[2]")) {
parm.name <- ifelse(parm == "beta.ghs[2]", "beta.ghs", parm)
if (parm %in% rownames(mcmc.summary$statistics)) {
a[[parm.name]] <- get.parameter.stat(mcmc.summary, parameter = parm)
} else {
a[[parm.name]] <- NA
}
}
return(unlist(a))
} # get.stats.mcmc
#--------------------------------------------------------------------------------------------------#
##' A helper function for building a LaTex table.
##'
##' Used by \code{\link{get.parameter.stat}}.
##' @title Paste Stats
##' @name paste.stats
##' @param mcmc.summary
##' @param median
##' @param lcl
##' @param ucl
##' @param n
##' @export
##' @author David LeBauer
paste.stats <- function(mcmc.summary, median, lcl, ucl, n = 2) {
paste0("$", tabnum(median, n),
"(", tabnum(lcl, n), ",", tabnum(ucl, n), ")",
"$")
} # paste.stats
#--------------------------------------------------------------------------------------------------#
##' Gets statistics for LaTeX - formatted table
##'
##' @title Get Parameter Statistics
##' @param mcmc.summary
##' @param parameter
##' @return table with parameter statistics
##' @author David LeBauer
##' @export
##' @examples
##' \dontrun{get.parameter.stat(mcmc.summaries[[1]], 'beta.o')}
get.parameter.stat <- function(mcmc.summary, parameter) {
paste.stats(median = mcmc.summary$quantiles[parameter, "50%"],
lcl = mcmc.summary$quantiles[parameter, c("2.5%")],
ucl = mcmc.summary$quantiles[parameter, c("97.5%")],
n = 2)
} # get.parameter.stat
#--------------------------------------------------------------------------------------------------#
##' Calculate mean, variance statistics, and CI from a known distribution
##'
##' @title Probability Distirbution Function Statistics
##' @param distn name of distribution used by R (beta, f, gamma, lnorm, norm, weibull)
##' @param A first parameter
##' @param B second parameter
##' @return list with mean, variance, and 95 CI
##' @author David LeBauer
## in future, perhaps create S3 functions: get.stats.pdf <- pdf.stats
pdf.stats <- function(distn, A, B) {
distn <- as.character(distn)
mean <- switch(distn, gamma = A/B, lnorm = exp(A + 1/2 * B^2), beta = A/(A +
B), weibull = B * gamma(1 + 1/A), norm = A, f = ifelse(B > 2, B/(B - 2),
mean(rf(10000, A, B))))
var <- switch(distn, gamma = A/B^2,
lnorm = exp(2 * A + B ^ 2) * (exp(B ^ 2) - 1),
beta = A * B/((A + B) ^ 2 * (A + B + 1)),
weibull = B ^ 2 * (gamma(1 + 2 / A) -
gamma(1 + 1 / A) ^ 2),
norm = B ^ 2, f = ifelse(B > 4,
2 * B^2 * (A + B - 2) / (A * (B - 2) ^ 2 * (B - 4)),
var(rf(1e+05, A, B))))
qci <- get(paste0("q", distn))
ci <- qci(c(0.025, 0.975), A, B)
lcl <- ci[1]
ucl <- ci[2]
out <- unlist(list(mean = mean, var = var, lcl = lcl, ucl = ucl))
return(out)
} # pdf.stats
#--------------------------------------------------------------------------------------------------#
##' Dictionary of terms used to identify traits in ed, filenames, and figures
##'
##' @return a dataframe with id, the name used by ED and PEcAn database for a parameter; fileid, an abbreviated
##' name used for files; figid, the parameter name written out as best known in english for figures
##' and tables.
##'
##' @param traits a vector of trait names, if traits = NULL, all of the traits will be returned.
##' @export
##' @examples
##' # convert parameter name to a string appropriate for end-use plotting
##' \dontrun{
##' trait.lookup('growth_resp_factor')
##' trait.lookup('growth_resp_factor')$figid
##'
##' # get a list of all traits and units in dictionary
##' trait.lookup()[,c('figid', 'units')]
##' }
trait.lookup <- function(traits = NULL) {
# HACK: shameless hack Ultimately we'll want this to be read once at the start of
# run time This could also be represented in the database, but because it is used
# to determine which parameters to feed to the model, it could be argued that
# it's conceptually model specific
data(trait.dictionary)
if (is.null(traits)) {
trait.defs <- trait.dictionary
} else {
trait.defs <- trait.dictionary[match(traits, trait.dictionary$id), ]
}
return(trait.defs)
} # trait.lookup
#--------------------------------------------------------------------------------------------------#
##' Convert number to n significant digits
##'
##' @title Table numbers
##' @param x numeric value or vector
##' @param n number of significant figures
##' @export
##' @author David LeBauer
##' @return x rounded to n significant figures
##' @examples
##' tabnum(1.2345)
##' tabnum(1.2345, n = 4)
tabnum <- function(x, n = 3) {
ans <- as.numeric(signif(x, n))
names(ans) <- names(x)
return(ans)
} # tabnum
#--------------------------------------------------------------------------------------------------#
##' Scale temperature dependent trait from measurement temperature to reference temperature
##'
##' @title Arrhenius scaling
##' @param observed.value observed value of temperature dependent trait, e.g. Vcmax, root respiration rate
##' @param old.temp temperature at which measurement was taken or previously scaled to
##' @param new.temp temperature to be scaled to, default = 25 C
##' @return numeric value at reference temperature
##' @export
##' @author unknown
arrhenius.scaling <- function(observed.value, old.temp, new.temp = 25) {
return(observed.value / exp(3000 * (1 / (273.15 + new.temp) - 1 / (273.15 + old.temp))))
} # arrhenius.scaling
#--------------------------------------------------------------------------------------------------#
##' Capitalize a string
##'
##' @title Capitalize a string
##' @param x string
##' @return x, capitalized
##' @author David LeBauer
#--------------------------------------------------------------------------------------------------#
capitalize <- function(x) {
x <- as.character(x)
s <- strsplit(x, " ")[[1]]
return(paste(toupper(substring(s, 1, 1)), substring(s, 2), sep = "", collapse = " "))
} # capitalize
isFALSE <- function(x) !isTRUE(x)
#--------------------------------------------------------------------------------------------------#
##' New xtable
##'
##' utility to properly escape the '%' sign for latex
##' @title newxtable
##' @param x data.frame to be converted to latex table
##' @param environment can be 'table'; 'sidewaystable' if using latex rotating package
##' @param table.placement
##' @param label
##' @param caption
##' @param caption.placement
##' @param align
##' @return Latex version of table, with percentages properly formatted
##' @author David LeBauer
newxtable <- function(x, environment = "table", table.placement = "ht", label = NULL,
caption = NULL, caption.placement = NULL, align = NULL) {
print(xtable(x, label = label, caption = caption, align = align),
floating.environment = environment,
table.placement = table.placement,
caption.placement = caption.placement,
# sanitize.text.function = function(x) gsub("%", "\\\\%", x),
sanitize.rownames.function = function(x) paste(''))
} # newxtable
#--------------------------------------------------------------------------------------------------#
##' Convert author, year, title to bibtex citation format
##'
##' Converts author year title to author1999abc format
##' @title bibtexify
##' @param author name of first author
##' @param year year of publication
##' @param title manuscript title
##' @return bibtex citation
#--------------------------------------------------------------------------------------------------#
bibtexify <- function(author, year, title) {
acronym <- abbreviate(title, minlength = 3, strict = TRUE)
return(paste0(author, year, acronym))
} # bibtexify
#--------------------------------------------------------------------------------------------------#
##' Convert categorical variable into sequential integers
##'
##' Turns any categorical variable into a sequential integer.
##' This transformation is required for using data in BUGS/JAGS
##' @title as.sequence
##' @param x categorical variable as vector
##' @param na.rm logical: return NA's or replace with max(x) + 1
##' @return sequence from 1:length(unique(x))
##' @export
##' @author David LeBauer
as.sequence <- function(x, na.rm = TRUE) {
x2 <- as.integer(factor(x, unique(x)))
if (all(is.na(x2))) {
x2 <- rep(1, length(x2))
}
if (na.rm == TRUE) {
x2[is.na(x2)] <- max(x2, na.rm = TRUE) + 1
}
return(x2)
} # as.sequence
#--------------------------------------------------------------------------------------------------#
##' Test ssh access
##'
##' Test to determine if access to a remote server is available.
##' Can be used to exclude / include tests or to prevent / identify access errors
##' @title Test Remote
##' @param host
##' @return logical - TRUE if remote connection is available
##' @author Rob Kooper
test.remote <- function(host) {
return(try(remote.execute.cmd(host, "/bin/true")) == 0)
} # test.remote
##' Create a temporary settings file
##'
##' Uses \code{\link{tempfile}} function to provide a valid temporary file (OS independent)
##' Useful for testing functions that depend on settings file
##' Reference: http://stackoverflow.com/a/12940705/199217
##' @title temp.settings
##' @param settings.txt
##' @return character vector written to and read from a temporary file
##' @export
##' @author David LeBauer
temp.settings <- function(settings.txt) {
temp <- tempfile()
on.exit(unlink(temp))
writeLines(settings.txt, con = temp)
settings <- readLines(temp)
return(settings)
} # temp.settings
##' Test if function gives an error
##'
##' adaptation of try that returns a logical value (FALSE if error)
##' @title tryl
##' @param FUN function to be evaluated for error
##' @return FALSE if function returns error; else TRUE
##' @export
##' @examples
##' tryl(1+1)
##' # TRUE
##' tryl(sum('a'))
##' # FALSE
##' @author David LeBauer
tryl <- function(FUN) {
out <- tryCatch(FUN, error = function(e) e)
ans <- !any(class(out) == "error")
return(ans)
} # tryl
##' load model package
##' @title Load model package
##' @param model name of model
##' @return FALSE if function returns error; else TRUE
##' @export
##' @examples
##' \dontrun{require.modelpkg(BioCro)}
##' @author David LeBauer
load.modelpkg <- function(model) {
pecan.modelpkg <- paste0("PEcAn.", model)
if (!pecan.modelpkg %in% names(sessionInfo()$otherPkgs)) {
if (pecan.modelpkg %in% rownames(installed.packages())) {
do.call(require, args = list(pecan.modelpkg))
} else {
logger.error("I can't find a package for the ", model,
"model; I expect it to be named ", pecan.modelpkg)
}
}
} # load.modelpkg
##' conversion function for the unit conversions that udunits cannot handle but often needed in PEcAn calculations
##' @title misc.convert
##' @export
##' @param x convertible values
##' @param u1 unit to be converted from, character
##' @param u2 unit to be converted to, character
##' @return val converted values
##' @author Istem Fer, Shawn Serbin
misc.convert <- function(x, u1, u2) {
amC <- PeriodicTable::mass("C") # atomic mass of carbon
mmH2O <- sum(PeriodicTable::mass(c("H", "H", "O"))) # molar mass of H2O, g/mol
if (u1 == "umol C m-2 s-1" & u2 == "kg C m-2 s-1") {
val <- udunits2::ud.convert(x, "ug", "kg") * amC
} else if (u1 == "kg C m-2 s-1" & u2 == "umol C m-2 s-1") {
val <- udunits2::ud.convert(x, "kg", "ug") / amC
} else if (u1 == "mol H2O m-2 s-1" & u2 == "kg H2O m-2 s-1") {
val <- udunits2::ud.convert(x, "g", "kg") * mmH2O
} else if (u1 == "kg H2O m-2 s-1" & u2 == "mol H2O m-2 s-1") {
val <- udunits2::ud.convert(x, "kg", "g") / mmH2O
} else if (u1 == "Mg ha-1" & u2 == "kg C m-2") {
val <- x * udunits2::ud.convert(1, "Mg", "kg") * udunits2::ud.convert(1, "ha-1", "m-2")
} else if (u1 == "kg C m-2" & u2 == "Mg ha-1") {
val <- x * udunits2::ud.convert(1, "kg", "Mg") * udunits2::ud.convert(1, "m-2", "ha-1")
} else {
u1 <- gsub("gC","g*12",u1)
u2 <- gsub("gC","g*12",u2)
val <- udunits2::ud.convert(x,u1,u2)
# logger.severe(paste("Unknown units", u1, u2))
}
return(val)
} # misc.convert
##' function to check whether units are convertible by misc.convert function
##' @title misc.are.convertible
##' @export
##' @param u1 unit to be converted from, character
##' @param u2 unit to be converted to, character
##' @return logical
##' @author Istem Fer, Shawn Serbin
misc.are.convertible <- function(u1, u2) {
# make sure the order of vectors match
units.from <- c("umol C m-2 s-1", "kg C m-2 s-1",
"mol H2O m-2 s-1", "kg H2O m-2 s-1",
"Mg ha-1", "kg C m-2")
units.to <- c("kg C m-2 s-1", "umol C m-2 s-1",
"kg H2O m-2 s-1", "mol H2O m-2 s-1",
"kg C m-2", "Mg ha-1")
if(u1 %in% units.from & u2 %in% units.to) {
if (which(units.from == u1) == which(units.to == u2)) {
return(TRUE)
} else {
return(FALSE)
}
} else {
return(FALSE)
}
}
##' Convert expression to variable names
##' @title convert.expr
##' @param expression expression string
##' @return list
##' @export
##' @author Istem Fer
convert.expr <- function(expression) {
# split equation to LHS and RHS
deri.var <- gsub("=.*$", "", expression) # name of the derived variable
deri.eqn <- gsub(".*=", "", expression) # derivation eqn
non.match <- gregexpr('[^a-zA-Z_.]', deri.eqn) # match characters that are not "a-zA-Z_."
split.chars <- unlist(regmatches(deri.eqn, non.match)) # where to split at
# split the expression to retrieve variable names to be used in read.output
if(length(split.chars)!=0){
variables <- unlist(strsplit(deri.eqn, paste0("[",noquote(paste0(split.chars, collapse="")),"]")))
variables <- variables[variables != ""] # Remove empty entries
} else {
variables <- deri.eqn
}
return(list(variable.drv = deri.var, variable.eqn = list(variables = variables, expression = deri.eqn)))
}
##' Simple function to use ncftpget for FTP downloads behind a firewall.
##' Requires ncftpget and a properly formatted config file in the users
##' home directory
##' @title download.file
##' @param url complete URL for file download
##' @param filename destination file name
##' @param method Method of file retrieval. Can set this using the options(download.ftp.method=[method]) in your Rprofile.
##' example options(download.ftp.method="ncftpget")
##'
##' @examples
##' download.file("http://lib.stat.cmu.edu/datasets/csb/ch11b.txt","~/test.download.txt")
##'
##' @examples
##' \dontrun{
##' download.file("ftp://ftp.cdc.noaa.gov/Datasets/NARR/monolevel/pres.sfc.2000.nc", "~/pres.sfc.2000.nc")
##' }
##'
##' @export
##'
##' @author Shawn Serbin, Rob Kooper
download.file <- function(url, filename, method) {
if (startsWith(url, "ftp://")) {
method <- if (missing(method)) getOption("download.ftp.method", default = "auto")
if (method == "ncftpget") {
logger.debug(paste0("FTP Method: ",method))
#system2("ncftpget", c("-c", "url", ">", filename))
system(paste(method,"-c",url,">",filename,sep=" "))
} else {
utils::download.file(url, filename, method)
}
} else {
utils::download.file(url, filename)
}
}
####################################################################################################
### EOF. End of R script file.
####################################################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core_use_compendium.R
\name{use_compendium}
\alias{use_compendium}
\title{Creates an R package suitable to use as a research compendium, and
switches to the working directory of this new package, ready to work}
\usage{
use_compendium(
path = getwd(),
fields = getOption("usethis.description"),
rstudio = rstudioapi::isAvailable(),
open = FALSE,
quiet = FALSE,
simple = TRUE
)
}
\arguments{
\item{path}{location to create new package. The last component of the path will be used as the package name}
\item{fields}{list of description values to override default values or add additional values}
\item{rstudio}{create an RStudio project file? (with \code{usethis::use_rstudio})}
\item{open}{if TRUE and in RStudio, the new project is opened in a new instance. If TRUE and not in RStudio, the working directory is set to the new project}
\item{quiet}{if FALSE, the default, prints informative messages}
\item{simple}{if TRUE, the default, the R/ directory is not created, because it's not necessary for many if not most research repositories}
}
\description{
This is usethis::create_package() with some additional messages to simplify the transition into the new project setting
}
| /man/use_compendium.Rd | permissive | jimsforks/rrtools | R | false | true | 1,272 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core_use_compendium.R
\name{use_compendium}
\alias{use_compendium}
\title{Creates an R package suitable to use as a research compendium, and
switches to the working directory of this new package, ready to work}
\usage{
use_compendium(
path = getwd(),
fields = getOption("usethis.description"),
rstudio = rstudioapi::isAvailable(),
open = FALSE,
quiet = FALSE,
simple = TRUE
)
}
\arguments{
\item{path}{location to create new package. The last component of the path will be used as the package name}
\item{fields}{list of description values to override default values or add additional values}
\item{rstudio}{create an RStudio project file? (with \code{usethis::use_rstudio})}
\item{open}{if TRUE and in RStudio, the new project is opened in a new instance. If TRUE and not in RStudio, the working directory is set to the new project}
\item{quiet}{if FALSE, the default, prints informative messages}
\item{simple}{if TRUE, the default, the R/ directory is not created, because it's not necessary for many if not most research repositories}
}
\description{
This is usethis::create_package() with some additional messages to simplify the transition into the new project setting
}
|
#!/usr/bin/env Rscript
#options(stringsAsFactors=F)
pseudocount = 1e-04
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
##################
# OPTION PARSING
##################
suppressPackageStartupMessages(library("optparse"))
option_list <- list(
make_option(c("-i", "--input_matrix"),
help="the matrix you want to analyze. It must have not a header. It is the output of read.genome.coverage.py"),
make_option(c("-m", "--metadata"), help="tsv file with metadata on matrix experiment"),
make_option(c("-M", "--merge_mdata_on"), help="which field corresponds to the ids in the summary file [default=%default]", default="labExpId"),
make_option(c("-o", "--output"), help="output file name (without extension) [default=%default]", default="summary.out"),
make_option(c("-H", "--height"), default=6,
help="height of the plot in inches [default=%default]"),
make_option(c("-W", "--width"), default=8,
help="width of the plot in inches [default=%default]"),
make_option(c("--facet_nrow"), type="integer",
help="number of rows when faceting"),
make_option(c("-f", "--facet"), help="dashboard field by which the individuals are faceted"),
make_option(c("-v", "--verbose"), action="store_true", default=FALSE,
help="verbose output [default=%default]")
)
parser <- OptionParser(usage = "%prog [options] file", option_list=option_list)
arguments <- parse_args(parser, positional_arguments = TRUE)
opt <- arguments$options
if( opt$verbose) {print(opt)}
##------------
## LIBRARIES
##------------
cat("Loading libraries... ")
suppressPackageStartupMessages(library(reshape2))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(plyr))
cat("DONE\n\n")
##--------------------##
## BEGIN ##
##--------------------##
# read the matrix from the command line
m = read.table(opt$input_matrix, h=F, col.names=c("labExpId", "type", "region", "nb_reads"))
# read the metadata from the metadata file
if (!is.null(opt$metadata)) {
mdata = read.table(opt$metadata, h=T, sep='\t')
mdata[opt$merge_mdata_on] <- sapply(mdata[opt$merge_mdata_on], function(x) gsub(",", ".", x))
}
if (opt$verbose) {print(head(m))}
# separate total from the rest
df = merge(m, setNames(subset(m, type=="total")[c(1,4)], c("labExpId", "total")), by="labExpId")
# merge split and continuous
all = merge(aggregate(nb_reads~labExpId+region, subset(df, region!="total"), sum), subset(m, type=="total")[c(1,2,4)], by="labExpId")
colnames(all)[c(3,5)] <- c("nb_reads", "total")
all$type <- "all"
df = rbind(df, all)
if (opt$verbose) {print(head(df))}
# attach the metadata
if (!is.null(opt$metadata)) {
mdata_header = unique(c(opt$facet, opt$merge_mdata_on))
df = merge(df, unique(mdata[mdata_header]), by.x='labExpId', by.y=opt$merge_mdata_on)
}
if (opt$verbose) {print(head(df))}
# ----------------- ggplot options ------------------------------
theme_set(theme_bw(base_size=18))
gp = ggplot(subset(df, type!="total"), aes(y=nb_reads/total*100, x=region))
gp = gp + geom_boxplot(aes(color=type, fill=type), alpha=0.5)
if (!is.null(opt$metadata)) {
gp = gp + facet_wrap(as.formula(sprintf("~%s", opt$facet)), nrow=opt$facet_nrow)
# gp = gp + facet_grid(as.formula(sprintf("~%s", opt$facet)))
}
gp = gp + labs(y='Proportion of mapped reads (%)', x="")
gp = gp + theme(axis.text = element_text(size=13, angle=45, h=1))
gp = gp + scale_color_brewer(palette="Set1")
w = opt$width
h = opt$height
ggsave(filename=sprintf("%s.pdf", opt$output), h=h, w=w)
ggsave(filename=sprintf("%s.png", opt$output), h=h, w=w)
ggsave(filename=sprintf("%s.eps", opt$output), h=h, w=w)
q(save='no')
| /Coverage/read.genomic.coverage.R | permissive | ahalfpen727/Bioconductor-Resources | R | false | false | 3,675 | r | #!/usr/bin/env Rscript
#options(stringsAsFactors=F)
pseudocount = 1e-04
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
##################
# OPTION PARSING
##################
suppressPackageStartupMessages(library("optparse"))
option_list <- list(
make_option(c("-i", "--input_matrix"),
help="the matrix you want to analyze. It must have not a header. It is the output of read.genome.coverage.py"),
make_option(c("-m", "--metadata"), help="tsv file with metadata on matrix experiment"),
make_option(c("-M", "--merge_mdata_on"), help="which field corresponds to the ids in the summary file [default=%default]", default="labExpId"),
make_option(c("-o", "--output"), help="output file name (without extension) [default=%default]", default="summary.out"),
make_option(c("-H", "--height"), default=6,
help="height of the plot in inches [default=%default]"),
make_option(c("-W", "--width"), default=8,
help="width of the plot in inches [default=%default]"),
make_option(c("--facet_nrow"), type="integer",
help="number of rows when faceting"),
make_option(c("-f", "--facet"), help="dashboard field by which the individuals are faceted"),
make_option(c("-v", "--verbose"), action="store_true", default=FALSE,
help="verbose output [default=%default]")
)
parser <- OptionParser(usage = "%prog [options] file", option_list=option_list)
arguments <- parse_args(parser, positional_arguments = TRUE)
opt <- arguments$options
if( opt$verbose) {print(opt)}
##------------
## LIBRARIES
##------------
cat("Loading libraries... ")
suppressPackageStartupMessages(library(reshape2))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(plyr))
cat("DONE\n\n")
##--------------------##
## BEGIN ##
##--------------------##
# read the matrix from the command line
m = read.table(opt$input_matrix, h=F, col.names=c("labExpId", "type", "region", "nb_reads"))
# read the metadata from the metadata file
if (!is.null(opt$metadata)) {
mdata = read.table(opt$metadata, h=T, sep='\t')
mdata[opt$merge_mdata_on] <- sapply(mdata[opt$merge_mdata_on], function(x) gsub(",", ".", x))
}
if (opt$verbose) {print(head(m))}
# separate total from the rest
df = merge(m, setNames(subset(m, type=="total")[c(1,4)], c("labExpId", "total")), by="labExpId")
# merge split and continuous
all = merge(aggregate(nb_reads~labExpId+region, subset(df, region!="total"), sum), subset(m, type=="total")[c(1,2,4)], by="labExpId")
colnames(all)[c(3,5)] <- c("nb_reads", "total")
all$type <- "all"
df = rbind(df, all)
if (opt$verbose) {print(head(df))}
# attach the metadata
if (!is.null(opt$metadata)) {
mdata_header = unique(c(opt$facet, opt$merge_mdata_on))
df = merge(df, unique(mdata[mdata_header]), by.x='labExpId', by.y=opt$merge_mdata_on)
}
if (opt$verbose) {print(head(df))}
# ----------------- ggplot options ------------------------------
theme_set(theme_bw(base_size=18))
gp = ggplot(subset(df, type!="total"), aes(y=nb_reads/total*100, x=region))
gp = gp + geom_boxplot(aes(color=type, fill=type), alpha=0.5)
if (!is.null(opt$metadata)) {
gp = gp + facet_wrap(as.formula(sprintf("~%s", opt$facet)), nrow=opt$facet_nrow)
# gp = gp + facet_grid(as.formula(sprintf("~%s", opt$facet)))
}
gp = gp + labs(y='Proportion of mapped reads (%)', x="")
gp = gp + theme(axis.text = element_text(size=13, angle=45, h=1))
gp = gp + scale_color_brewer(palette="Set1")
w = opt$width
h = opt$height
ggsave(filename=sprintf("%s.pdf", opt$output), h=h, w=w)
ggsave(filename=sprintf("%s.png", opt$output), h=h, w=w)
ggsave(filename=sprintf("%s.eps", opt$output), h=h, w=w)
q(save='no')
|
### sbracalbca bottom calculations
modelAFT = read.csv(file ='C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelsbrcalbca.csv')
gof=matrix(0,nrow=3,ncol=3)
colnames(gof)=c("DIC","pD","Dbar")
rownames(gof)=c("BrCa","LBCa","Total")
gof[1:2,3]=modelAFT[98:99,2]#D1,D2 means
# sd of D2 is Zero, issue in calculation
gof[1:2,2]=.5*modelAFT[98:99,3]^2#D1, D2 sd's squared
gof[1:2,1]=gof[1:2,3]+gof[1:2,2]
gof[3,]=gof[1,]+gof[2,]
#write.table(gof,"C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelsbrcalbca.csv",col.names=F,append=T,sep=",")
### need to rerun both temporal models ###
### sbrcacrcaprca bottom calculations
modelAFT = read.csv(file ="C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelsbrcacrcaprca.csv")
gof=matrix(0,nrow=4,ncol=3)
colnames(gof)=c("DIC","pD","Dbar")
rownames(gof)=c("BrCa","CrCa","PrCa","Total")
gof[1:3,3]=modelAFT[112:114,2]#D1,D2 means DOUBLE CHECK THESE ROWS
gof[1:3,2]=.5*modelAFT[112:114,3]^2#D1, D2 sd's squared
gof[1:3,1]=gof[1:3,3]+gof[1:3,2]
gof[4,]=gof[1,]+gof[2,]+gof[3,]
write.table(gof,"C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelsbrcacrcaprca.csv",col.names=F,append=T,sep=",")
### stbrcalbca
modelAFT = read.csv(file = 'C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelstbrcalbca.csv')
gof=matrix(0,nrow=3,ncol=3)
colnames(gof)=c("DIC","pD","Dbar")
rownames(gof)=c("BrCa","LBCa","Total")
gof[1:2,3]=modelAFT[126:127,2]#D1,D2 means DOUBLE CHECK THESE ROWS!
gof[1:2,2]=.5*modelAFT[126:127,3]^2#D1, D2 sd's squared
gof[1:2,1]=gof[1:2,3]+gof[1:2,2]
gof[3,]=gof[1,]+gof[2,]
#write.table(gof,"C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelstbrcalbca.csv",col.names=F,append=T,sep=",")
### stbrcacrcaprca bottom calculations
modelAFT = read.csv(file ="C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelstbrcacrcaprca.csv")
gof=matrix(0,nrow=4,ncol=3)
colnames(gof)=c("DIC","pD","Dbar")
rownames(gof)=c("BrCa","CrCa","PrCa","Total")
gof[1:3,3]=modelAFT[112:114,2]#D1,D2 means DOUBLE CHECK THESE ROWS
gof[1:3,2]=.5*modelAFT[112:114,3]^2#D1, D2 sd's squared
gof[1:3,1]=gof[1:3,3]+gof[1:3,2]
gof[4,]=gof[1,]+gof[2,]+gof[3,]
#write.table(gof,"C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelstbrcacrcaprca.csv",col.names=F,append=T,sep=",")
### STCPbrcalbca bottom calculations
modelAFT = read.csv(file ="C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelSTCPbrcalbca.csv")
gof=matrix(0,nrow=3,ncol=3)
colnames(gof)=c("DIC","pD","Dbar")
rownames(gof)=c("BrCa","LbCa","Total")
gof[1:2,3]=modelAFT[126:127,2]#D1,D2 means DOUBLE CHECK THESE ROWS
gof[1:2,2]=.5*modelAFT[126:127,3]^2#D1, D2 sd's squared
gof[1:2,1]=gof[1:2,3]+gof[1:2,2]
gof[3,]=gof[1,]+gof[2,]
write.table(gof,"C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelSTCPbrcalbca.csv",col.names=F,append=T,sep=",")
| /code/CodeForJC/CheckDIC.R | no_license | carrollrm/sptl_temp_cancer | R | false | false | 2,914 | r |
### sbracalbca bottom calculations
modelAFT = read.csv(file ='C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelsbrcalbca.csv')
gof=matrix(0,nrow=3,ncol=3)
colnames(gof)=c("DIC","pD","Dbar")
rownames(gof)=c("BrCa","LBCa","Total")
gof[1:2,3]=modelAFT[98:99,2]#D1,D2 means
# sd of D2 is Zero, issue in calculation
gof[1:2,2]=.5*modelAFT[98:99,3]^2#D1, D2 sd's squared
gof[1:2,1]=gof[1:2,3]+gof[1:2,2]
gof[3,]=gof[1,]+gof[2,]
#write.table(gof,"C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelsbrcalbca.csv",col.names=F,append=T,sep=",")
### need to rerun both temporal models ###
### sbrcacrcaprca bottom calculations
modelAFT = read.csv(file ="C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelsbrcacrcaprca.csv")
gof=matrix(0,nrow=4,ncol=3)
colnames(gof)=c("DIC","pD","Dbar")
rownames(gof)=c("BrCa","CrCa","PrCa","Total")
gof[1:3,3]=modelAFT[112:114,2]#D1,D2 means DOUBLE CHECK THESE ROWS
gof[1:3,2]=.5*modelAFT[112:114,3]^2#D1, D2 sd's squared
gof[1:3,1]=gof[1:3,3]+gof[1:3,2]
gof[4,]=gof[1,]+gof[2,]+gof[3,]
write.table(gof,"C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelsbrcacrcaprca.csv",col.names=F,append=T,sep=",")
### stbrcalbca
modelAFT = read.csv(file = 'C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelstbrcalbca.csv')
gof=matrix(0,nrow=3,ncol=3)
colnames(gof)=c("DIC","pD","Dbar")
rownames(gof)=c("BrCa","LBCa","Total")
gof[1:2,3]=modelAFT[126:127,2]#D1,D2 means DOUBLE CHECK THESE ROWS!
gof[1:2,2]=.5*modelAFT[126:127,3]^2#D1, D2 sd's squared
gof[1:2,1]=gof[1:2,3]+gof[1:2,2]
gof[3,]=gof[1,]+gof[2,]
#write.table(gof,"C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelstbrcalbca.csv",col.names=F,append=T,sep=",")
### stbrcacrcaprca bottom calculations
modelAFT = read.csv(file ="C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelstbrcacrcaprca.csv")
gof=matrix(0,nrow=4,ncol=3)
colnames(gof)=c("DIC","pD","Dbar")
rownames(gof)=c("BrCa","CrCa","PrCa","Total")
gof[1:3,3]=modelAFT[112:114,2]#D1,D2 means DOUBLE CHECK THESE ROWS
gof[1:3,2]=.5*modelAFT[112:114,3]^2#D1, D2 sd's squared
gof[1:3,1]=gof[1:3,3]+gof[1:3,2]
gof[4,]=gof[1,]+gof[2,]+gof[3,]
#write.table(gof,"C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelstbrcacrcaprca.csv",col.names=F,append=T,sep=",")
### STCPbrcalbca bottom calculations
modelAFT = read.csv(file ="C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelSTCPbrcalbca.csv")
gof=matrix(0,nrow=3,ncol=3)
colnames(gof)=c("DIC","pD","Dbar")
rownames(gof)=c("BrCa","LbCa","Total")
gof[1:2,3]=modelAFT[126:127,2]#D1,D2 means DOUBLE CHECK THESE ROWS
gof[1:2,2]=.5*modelAFT[126:127,3]^2#D1, D2 sd's squared
gof[1:2,1]=gof[1:2,3]+gof[1:2,2]
gof[3,]=gof[1,]+gof[2,]
write.table(gof,"C:/Users/twili/OneDrive/Documents/Classes/MAT 596/code/Results/modelSTCPbrcalbca.csv",col.names=F,append=T,sep=",")
|
plotlyErlangDistribution <- function(plotrange, input, distType, probrange) {
}
| /plotlyFunctions/Erlang.R | no_license | SOCR/ProbDistCalc_RShiny | R | false | false | 80 | r | plotlyErlangDistribution <- function(plotrange, input, distType, probrange) {
}
|
##############################################################################
# Copyright (c) 2012-2016 Russell V. Lenth #
# #
# This file is part of the lsmeans package for R (*lsmeans*) #
# #
# *lsmeans* is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 2 of the License, or #
# (at your option) any later version. #
# #
# *lsmeans* is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with R and *lsmeans*. If not, see #
# <https://www.r-project.org/Licenses/> and/or #
# <http://www.gnu.org/licenses/>. #
##############################################################################
# lsmeans support for aovlist objects
recover.data.aovlist = function(object, ...) {
fcall = attr(object, "call")
trms = terms(object)
# Find the Error terms
lbls = attr(trms, "term.labels")
err.idx = grep("^Error\\(", lbls)
newf = as.formula(paste(c(".~.", lbls[err.idx]), collapse = "-"))
trms = terms(update(trms, newf))
recover.data(fcall, delete.response(trms), na.action = attr(object, "na.action"), ...)
}
# This works great for balanced experiments, and goes horribly wrong
# even for slightly unbalanced ones. So I abort on these kinds of cases
lsm.basis.aovlist = function (object, trms, xlev, grid, vcov., ...) {
m = model.frame(trms, grid, na.action = na.pass, xlev = xlev)
contr = attr(object, "contrasts")
X = model.matrix(trms, m, contrasts.arg = contr)
xnms = dimnames(X)[[2]]
# Check for situations we can't handle...
colsums = apply(X[, setdiff(xnms, "(Intercept)"), drop=FALSE], 2, sum)
if (any(round(colsums,3) != 0))
warning("Some predictors are correlated with the intercept - results are biased.\n",
"May help to re-fit with different contrasts, e.g. 'contr.sum'")
if (length(unlist(lapply(object, function(x) names(coef(x))))) > length(xnms))
message("NOTE: Results are based on intra-block estimates.")
# initialize arrays
nonint = setdiff(names(object), "(Intercept)")
k = length(xnms)
bhat = rep(NA, k) # I'll use NAs to track which slots I've filled
V = matrix(0, nrow=k, ncol=k)
names(bhat) = xnms
dimnames(V) = list(xnms, xnms)
empty.list = as.list(nonint)
names(empty.list) = nonint
Vmats = Vidx = Vdf = empty.list
wts = matrix(0, nrow = length(nonint), ncol = k)
dimnames(wts) = list(nonint, xnms)
# NOTE: At present, I just do intra-block analysis: wts are all 0 and 1
btemp = bhat #++ temp for tracking indexes
#++Work thru strata in reverse order
for (nm in rev(nonint)) {
x = object[[nm]]
bi = coef(x)
bi = bi[!is.na(bi)]
ii = match(names(bi), xnms)
Vidx[[nm]] = use = setdiff(ii, which(!is.na(bhat))) #++ omit elts already filled
if(length(use) > 0) {
ii.left = seq_along(ii)[!is.na(match(ii,use))]
wts[nm, use] = 1
bhat[use] = bi[ii.left]
Vi = vcov(x)[ii.left, ii.left, drop=FALSE]
Vmats[[nm]] = Vi
V[use,use] = Vi
}
else {
Vmats[[nm]] = matrix(0, nrow=0, ncol=0)
}
# Any cases with 0 df will have NaN for covariances. I make df = -1
# in those cases so I don't divide by 0 later in Satterthwaite calcs
Vdf[[nm]] = ifelse(x$df > 0, x$df, -1)
}
x <- object[["(Intercept)"]]
if (!is.null(x)) {
# The intercept belongs in the 1st error stratum
# So have to add a row and column to its covariance matrix
bhat[1] = x$coefficients[1]
wts[1,1] = 1
Vidx[[1]] = ii = c(1, Vidx[[1]])
k = length(ii)
vv = matrix(0, nrow=k, ncol=k)
if (k > 1) vv[2:k,2:k] = Vmats[[1]]
# Variance of intercept is EMS of this stratum divided by N
# Here I'm assuming there are no weights
N = sum(sapply(object, function(x) length(x$residuals)))
V[1,1] = vv[1,1] = sum(object[[2]]$residuals^2) / object[[2]]$df / N
#dimnames(vv) = list(c(xnms[ii], xnms[ii]))
Vmats[[1]] = vv
}
# override V if vcov. is supplied
if(!missing(vcov.)) {
V = .my.vcov(object, vcov.)
dfargs = list()
dffun = function(k, dfargs) NA
}
else {
dfargs = list(Vmats=Vmats, Vidx=Vidx, Vdf=unlist(Vdf), wts = wts)
dffun = function(k, dfargs) {
lsmeans::.aovlist.dffun(k, dfargs)
}
}
nbasis = estimability::all.estble # Consider this further?
misc = list()
list(X = X, bhat = bhat, nbasis = nbasis, V = V, dffun = dffun,
dfargs = dfargs, misc = misc)
}
.aovlist.dffun = function(k, dfargs) {
if(is.matrix(k) && (nrow(k) > 1)) {
dfs = apply(k, 1, .aovlist.dffun, dfargs)
min(dfs)
}
else {
v = sapply(seq_along(dfargs$Vdf), function(j) {
ii = dfargs$Vidx[[j]]
kk = (k * dfargs$wts[j, ])[ii]
#sum(kk * .mat.times.vec(dfargs$Vmats[[j]], kk))
.qf.non0(dfargs$Vmats[[j]], kk)
})
sum(v)^2 / sum(v^2 / dfargs$Vdf) # Good ole Satterthwaite
}
} | /R/aovlist-support.R | no_license | jonathon-love/lsmeans | R | false | false | 6,243 | r | ##############################################################################
# Copyright (c) 2012-2016 Russell V. Lenth #
# #
# This file is part of the lsmeans package for R (*lsmeans*) #
# #
# *lsmeans* is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 2 of the License, or #
# (at your option) any later version. #
# #
# *lsmeans* is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with R and *lsmeans*. If not, see #
# <https://www.r-project.org/Licenses/> and/or #
# <http://www.gnu.org/licenses/>. #
##############################################################################
# lsmeans support for aovlist objects
recover.data.aovlist = function(object, ...) {
fcall = attr(object, "call")
trms = terms(object)
# Find the Error terms
lbls = attr(trms, "term.labels")
err.idx = grep("^Error\\(", lbls)
newf = as.formula(paste(c(".~.", lbls[err.idx]), collapse = "-"))
trms = terms(update(trms, newf))
recover.data(fcall, delete.response(trms), na.action = attr(object, "na.action"), ...)
}
# This works great for balanced experiments, and goes horribly wrong
# even for slightly unbalanced ones. So I abort on these kinds of cases
lsm.basis.aovlist = function (object, trms, xlev, grid, vcov., ...) {
m = model.frame(trms, grid, na.action = na.pass, xlev = xlev)
contr = attr(object, "contrasts")
X = model.matrix(trms, m, contrasts.arg = contr)
xnms = dimnames(X)[[2]]
# Check for situations we can't handle...
colsums = apply(X[, setdiff(xnms, "(Intercept)"), drop=FALSE], 2, sum)
if (any(round(colsums,3) != 0))
warning("Some predictors are correlated with the intercept - results are biased.\n",
"May help to re-fit with different contrasts, e.g. 'contr.sum'")
if (length(unlist(lapply(object, function(x) names(coef(x))))) > length(xnms))
message("NOTE: Results are based on intra-block estimates.")
# initialize arrays
nonint = setdiff(names(object), "(Intercept)")
k = length(xnms)
bhat = rep(NA, k) # I'll use NAs to track which slots I've filled
V = matrix(0, nrow=k, ncol=k)
names(bhat) = xnms
dimnames(V) = list(xnms, xnms)
empty.list = as.list(nonint)
names(empty.list) = nonint
Vmats = Vidx = Vdf = empty.list
wts = matrix(0, nrow = length(nonint), ncol = k)
dimnames(wts) = list(nonint, xnms)
# NOTE: At present, I just do intra-block analysis: wts are all 0 and 1
btemp = bhat #++ temp for tracking indexes
#++Work thru strata in reverse order
for (nm in rev(nonint)) {
x = object[[nm]]
bi = coef(x)
bi = bi[!is.na(bi)]
ii = match(names(bi), xnms)
Vidx[[nm]] = use = setdiff(ii, which(!is.na(bhat))) #++ omit elts already filled
if(length(use) > 0) {
ii.left = seq_along(ii)[!is.na(match(ii,use))]
wts[nm, use] = 1
bhat[use] = bi[ii.left]
Vi = vcov(x)[ii.left, ii.left, drop=FALSE]
Vmats[[nm]] = Vi
V[use,use] = Vi
}
else {
Vmats[[nm]] = matrix(0, nrow=0, ncol=0)
}
# Any cases with 0 df will have NaN for covariances. I make df = -1
# in those cases so I don't divide by 0 later in Satterthwaite calcs
Vdf[[nm]] = ifelse(x$df > 0, x$df, -1)
}
x <- object[["(Intercept)"]]
if (!is.null(x)) {
# The intercept belongs in the 1st error stratum
# So have to add a row and column to its covariance matrix
bhat[1] = x$coefficients[1]
wts[1,1] = 1
Vidx[[1]] = ii = c(1, Vidx[[1]])
k = length(ii)
vv = matrix(0, nrow=k, ncol=k)
if (k > 1) vv[2:k,2:k] = Vmats[[1]]
# Variance of intercept is EMS of this stratum divided by N
# Here I'm assuming there are no weights
N = sum(sapply(object, function(x) length(x$residuals)))
V[1,1] = vv[1,1] = sum(object[[2]]$residuals^2) / object[[2]]$df / N
#dimnames(vv) = list(c(xnms[ii], xnms[ii]))
Vmats[[1]] = vv
}
# override V if vcov. is supplied
if(!missing(vcov.)) {
V = .my.vcov(object, vcov.)
dfargs = list()
dffun = function(k, dfargs) NA
}
else {
dfargs = list(Vmats=Vmats, Vidx=Vidx, Vdf=unlist(Vdf), wts = wts)
dffun = function(k, dfargs) {
lsmeans::.aovlist.dffun(k, dfargs)
}
}
nbasis = estimability::all.estble # Consider this further?
misc = list()
list(X = X, bhat = bhat, nbasis = nbasis, V = V, dffun = dffun,
dfargs = dfargs, misc = misc)
}
.aovlist.dffun = function(k, dfargs) {
if(is.matrix(k) && (nrow(k) > 1)) {
dfs = apply(k, 1, .aovlist.dffun, dfargs)
min(dfs)
}
else {
v = sapply(seq_along(dfargs$Vdf), function(j) {
ii = dfargs$Vidx[[j]]
kk = (k * dfargs$wts[j, ])[ii]
#sum(kk * .mat.times.vec(dfargs$Vmats[[j]], kk))
.qf.non0(dfargs$Vmats[[j]], kk)
})
sum(v)^2 / sum(v^2 / dfargs$Vdf) # Good ole Satterthwaite
}
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_model.R
\name{run_model}
\alias{run_model}
\title{Run the model over time}
\usage{
run_model(steps, initLand, managInt = c(0, 0, 0, 0), RCP = 0,
stoch = TRUE, cores = 1, outputLand = NA, rangeLimitOccup = NULL,
stateOccup = FALSE, saveOutput = FALSE, fileOutput = NULL,
folderOutput = NULL)
}
\arguments{
\item{steps}{numeric, the total number of steps to run the model. Here 1 step is equivalent to 5 years.}
\item{initLand}{output object from the \code{\link{create_virtual_landscape}} or \code{\link{create_real_landscape}} function}
\item{managInt}{vector, intensity of the four ordered management practices: plantation, harvest, thinning and enrichment plantation. Values must be bounded between \code{0} and \code{1}, where \code{0} means the natural dynamics without forest management.}
\item{RCP}{Numeric, \href{https://en.wikipedia.org/wiki/Representative_Concentration_Pathway}{Representative Concentration Pathway}. Five scenarios of RCP are available: \code{0}, \code{2.6}, \code{4.5}, \code{6} and \code{8.5}}
\item{stoch}{logical, if \code{TRUE}, the prevalence of each cell will depend in a probabilistic random generator. Otherwise the prevalence will be deterministic.}
\item{cores}{numeric, the number of cores to be used in a parallel computation. The parallel is computed with the \code{mclapply} function. If \code{cores = 1}, a loop for will be used instead.}
\item{outputLand}{vector, an integer vector to define the time steps to be saved at the end of the simulation. This argument is useful when we only need to compare the first and last time step \code{outputLand = c(1, steps)}, or when the size of the landscape is too big so we need to reduce memory usage.}
\item{rangeLimitOccup}{numeric between 0 and 1. If not \code{NULL}, the function will calculate the landscape position of the boreal trailing edge and the temperate leading edge for each time step. The defined value betwen 0 and 1 determines the minimum occupancy a row of the landscape must be occupied by a specific forest state to be considered part of the state range. E.g. if \code{rangeLimitOccup = 0.8}, the furthest row of the landscape with a proportion less than 0.8 will be considered the range limit of the state. Default is \code{0.85}, but values ranging from \code{0.7} to \code{0.95} does not have much effect on migration rate (see \href{https://github.com/willvieira/STManaged/issues/3}{figure 3} of sensitivity analysis). It returns a data frame.}
\item{stateOccup}{logical, calculate the proportion of the four forest states for each row of the landscape for all time steps. This argument is useful when you need the information from each time step but cannot save all landscapes because of memory limitation. Returns a list with a data frame for each time step}
\item{saveOutput}{logical, if \code{TRUE} it will save the output list in the 'output' directory with an automatic name with the main information from the simulation}
\item{fileOutput, }{character, if not \code{NULL}, define the name of the file output}
\item{folderOutput, }{character, if not \code{NULL}, define the name of the folder other than the default 'output'}
}
\value{
a list with the (i) landscape configuration for each step, (ii) scaled temperature gradient, (iii) steps, (iv) management intensity, (v) RCP scenario, (vi) landscape dimensions and (vii) range limit data frame
}
\description{
This function generates the spatiotemporal dynamics based in the initial landscape, climate change and forest management
}
\examples{
\dontrun{
initLand = create_virtual_landscape(cellSize = 5)
lands <- run_model(steps = 10, initLand,
managInt = c(0.15, 0, 0, 0),
RCP = 4.5,
rangeLimitOccup = 0.75)
}
}
| /man/run_model.Rd | permissive | willvieira/STManaged | R | false | true | 3,838 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_model.R
\name{run_model}
\alias{run_model}
\title{Run the model over time}
\usage{
run_model(steps, initLand, managInt = c(0, 0, 0, 0), RCP = 0,
stoch = TRUE, cores = 1, outputLand = NA, rangeLimitOccup = NULL,
stateOccup = FALSE, saveOutput = FALSE, fileOutput = NULL,
folderOutput = NULL)
}
\arguments{
\item{steps}{numeric, the total number of steps to run the model. Here 1 step is equivalent to 5 years.}
\item{initLand}{output object from the \code{\link{create_virtual_landscape}} or \code{\link{create_real_landscape}} function}
\item{managInt}{vector, intensity of the four ordered management practices: plantation, harvest, thinning and enrichment plantation. Values must be bounded between \code{0} and \code{1}, where \code{0} means the natural dynamics without forest management.}
\item{RCP}{Numeric, \href{https://en.wikipedia.org/wiki/Representative_Concentration_Pathway}{Representative Concentration Pathway}. Five scenarios of RCP are available: \code{0}, \code{2.6}, \code{4.5}, \code{6} and \code{8.5}}
\item{stoch}{logical, if \code{TRUE}, the prevalence of each cell will depend in a probabilistic random generator. Otherwise the prevalence will be deterministic.}
\item{cores}{numeric, the number of cores to be used in a parallel computation. The parallel is computed with the \code{mclapply} function. If \code{cores = 1}, a loop for will be used instead.}
\item{outputLand}{vector, an integer vector to define the time steps to be saved at the end of the simulation. This argument is useful when we only need to compare the first and last time step \code{outputLand = c(1, steps)}, or when the size of the landscape is too big so we need to reduce memory usage.}
\item{rangeLimitOccup}{numeric between 0 and 1. If not \code{NULL}, the function will calculate the landscape position of the boreal trailing edge and the temperate leading edge for each time step. The defined value betwen 0 and 1 determines the minimum occupancy a row of the landscape must be occupied by a specific forest state to be considered part of the state range. E.g. if \code{rangeLimitOccup = 0.8}, the furthest row of the landscape with a proportion less than 0.8 will be considered the range limit of the state. Default is \code{0.85}, but values ranging from \code{0.7} to \code{0.95} does not have much effect on migration rate (see \href{https://github.com/willvieira/STManaged/issues/3}{figure 3} of sensitivity analysis). It returns a data frame.}
\item{stateOccup}{logical, calculate the proportion of the four forest states for each row of the landscape for all time steps. This argument is useful when you need the information from each time step but cannot save all landscapes because of memory limitation. Returns a list with a data frame for each time step}
\item{saveOutput}{logical, if \code{TRUE} it will save the output list in the 'output' directory with an automatic name with the main information from the simulation}
\item{fileOutput, }{character, if not \code{NULL}, define the name of the file output}
\item{folderOutput, }{character, if not \code{NULL}, define the name of the folder other than the default 'output'}
}
\value{
a list with the (i) landscape configuration for each step, (ii) scaled temperature gradient, (iii) steps, (iv) management intensity, (v) RCP scenario, (vi) landscape dimensions and (vii) range limit data frame
}
\description{
This function generates the spatiotemporal dynamics based in the initial landscape, climate change and forest management
}
\examples{
\dontrun{
initLand = create_virtual_landscape(cellSize = 5)
lands <- run_model(steps = 10, initLand,
managInt = c(0.15, 0, 0, 0),
RCP = 4.5,
rangeLimitOccup = 0.75)
}
}
|
# for "html tables a la rstudio::conf(2018)", see:
# https://beta.rstudioconnect.com/content/3105/
rstudio <- tibble(
name = list("bash ninja", "html tables a la rstudio::conf(2018)",
"flesh out google sheets as a backend for shiny app data",
"look into shiny.collections",
"pivot tables", "grable", "toolbars", "reactlog",
"drill-down", "drill-through", "react + shiny", "d3 + shiny",
"flex and shinydashboard theme", "revamp shinydashboard"),
description = as.list(letters[1:length(name)]),
links = as.list(letters[1:length(name)]),
priority = as.list(letters[1:length(name)]),
selected = rep(NA, length(name))
)
rstudioModUI <- function(id) {
ns <- NS(id)
tagList(
"List of work-inspired projects I'd like to do",
checkboxGroupInput(ns("projects"), "Select the ones in progress",
unique(rstudio$name), unique(rstudio$selected)),
DT::dataTableOutput(ns("table"))
)
}
rstudioMod <- function(input, output, session) {} | /modules/rstudio.R | no_license | bborgesr/sandbox | R | false | false | 979 | r |
# for "html tables a la rstudio::conf(2018)", see:
# https://beta.rstudioconnect.com/content/3105/
rstudio <- tibble(
name = list("bash ninja", "html tables a la rstudio::conf(2018)",
"flesh out google sheets as a backend for shiny app data",
"look into shiny.collections",
"pivot tables", "grable", "toolbars", "reactlog",
"drill-down", "drill-through", "react + shiny", "d3 + shiny",
"flex and shinydashboard theme", "revamp shinydashboard"),
description = as.list(letters[1:length(name)]),
links = as.list(letters[1:length(name)]),
priority = as.list(letters[1:length(name)]),
selected = rep(NA, length(name))
)
rstudioModUI <- function(id) {
ns <- NS(id)
tagList(
"List of work-inspired projects I'd like to do",
checkboxGroupInput(ns("projects"), "Select the ones in progress",
unique(rstudio$name), unique(rstudio$selected)),
DT::dataTableOutput(ns("table"))
)
}
rstudioMod <- function(input, output, session) {} |
#' Updated function to add plot to table
#'
#' @param x Numeric vectors for position coord.
#' @param y Numeric vectors for position coord.
#' @param table Data table to add.
#' @param lwd,bty,bg,cex,xjust,yjust,xpad,ypad See par for details.
#' @param box.col,text.col,col.rowname,col.colname Details for color of bg and text for different aspects
#' @param display.colnames,display.rownames Logical vectors for whether row and column names should be displayed.
#' @param hlines,vlines Logical vectors for whether table should have horizontal and vertical lines.
#' @param title Value for table header.
#' @param text.font See par for details on font.
#' @export
#'
#' @seealso plotrix
#'
addtable2plot.new=function (x, y = NULL, table, lwd = par("lwd"), bty = "n", bg = par("bg"),
cex = 1, xjust= 0, yjust = 1, xpad = 0.1, ypad = 0.5, box.col = par("fg"),
text.col = par("fg"),col.rowname = par("fg"),col.colname = par("fg"), display.colnames = TRUE, display.rownames = FALSE,
hlines = FALSE, vlines = FALSE, title = NULL, text.font=NULL)
{
require(plotrix)
if (dev.cur() == 1)
stop("Cannot add table unless a graphics device is open")
if (is.null(y)) {
if (is.character(x)) {
tablepos <- get.tablepos(x)
x <- tablepos$x
y <- tablepos$y
xjust <- tablepos$xjust
yjust <- tablepos$yjust
}
else {
if (is.null(x$y))
stop("both x and y coordinates must be given")
y <- x$y
x <- x$x
}
}
droptop <- ifelse(any(c("topleft", "top", "topright") %in%
x), 1, 0)
tabdim <- dim(table)
if (tabdim[1] == 1)
hlines <- FALSE
if (tabdim[2] == 1)
vlines <- FALSE
if (is.null(dim(bg)))
bg <- matrix(bg, nrow = tabdim[1], ncol = tabdim[2])
if (is.null(dim(text.col)))
text.col <- matrix(text.col, nrow = tabdim[1], ncol = tabdim[2])
column.names <- colnames(table)
if (is.null(column.names) && display.colnames)
column.names <- 1:tabdim[2]
row.names <- rownames(table)
if (is.null(row.names) && display.rownames)
row.names <- 1:tabdim[1]
if (par("xlog"))
x <- log10(x)
cellwidth <- rep(0, tabdim[2])
if (display.colnames) {
for (column in 1:tabdim[2]) cellwidth[column] <- max(strwidth(c(column.names[column],
format(table[, column])), cex = cex)) * (1 + xpad)
nvcells <- tabdim[1] + 1
}
else {
nvcells <- tabdim[1]
for (column in 1:tabdim[2]) cellwidth[column] <- max(strwidth(format(table[,
column]), cex = cex)) * (1 + xpad)
}
if (display.rownames) {
nhcells <- tabdim[2] + 1
rowname.width <- max(strwidth(row.names, cex = cex)) *
(1 + xpad)
}
else {
nhcells <- tabdim[2]
rowname.width <- 0
}
if (par("ylog"))
y <- log10(y)
cellheight <- max(strheight(c(column.names, row.names, as.vector(unlist(table))),
cex = cex)) * (1 + ypad)
if (!is.null(title) & droptop)
y <- y - cellheight
ytop <- y + yjust * nvcells * cellheight
oldpar <- par(xlog = FALSE, ylog = FALSE, xpd = TRUE)
if (display.colnames) {
xleft <- x + display.rownames * rowname.width - xjust *
(sum(cellwidth) + rowname.width)
for (column in 1:tabdim[2]) {
text(xleft + cellwidth[column] * 0.5, ytop - 0.5 *
cellheight, column.names[column], cex = cex,
col = col.colname, font=text.font)
xleft <- xleft + cellwidth[column]
}
}
for (row in 1:tabdim[1]) {
xleft <- x - xjust * (sum(cellwidth) + rowname.width)
if (display.rownames) {
text(xleft + 0.5 * rowname.width, ytop - (row + display.colnames -
0.5) * cellheight, row.names[row], cex = cex,
col = col.rowname, font=text.font)
xleft <- xleft + rowname.width
}
for (column in 1:tabdim[2]) {
rect(xleft, ytop - (row + display.colnames - 1) *
cellheight, xleft + cellwidth[column], ytop -
(row + display.colnames) * cellheight, col = bg[row,
column], border = bg[row, column])
text(xleft + 0.5 * cellwidth[column], ytop - (row +
display.colnames - 0.5) * cellheight, table[row,
column], cex = cex, col = text.col[row,column], font=text.font)
xleft <- xleft + cellwidth[column]
}
}
if (vlines) {
xleft <- x + display.rownames * rowname.width - xjust *
(sum(cellwidth) + rowname.width)
segments(xleft + cumsum(cellwidth[-tabdim[2]]), ytop -
display.colnames * cellheight, xleft + cumsum(cellwidth[-tabdim[2]]),
ytop - (display.colnames + tabdim[1]) * cellheight)
}
if (hlines) {
xleft <- x + display.rownames * rowname.width - xjust *
(sum(cellwidth) + rowname.width)
segments(xleft, ytop - display.colnames * cellheight -
cumsum(rep(cellheight, tabdim[1] - 1)), xleft + sum(cellwidth),
ytop - display.colnames * cellheight - cumsum(rep(cellheight,
tabdim[1] - 1)))
}
if (!is.null(title)) {
xleft <- x - xjust * (sum(cellwidth) + rowname.width)
text(xleft + (rowname.width + sum(cellwidth))/2, ytop +
cellheight/2, title, cex = cex, col = text.col, font=text.font)
}
if (bty == "o") {
xleft <- x + display.rownames * rowname.width - xjust *
(sum(cellwidth) + rowname.width)
rect(xleft, ytop - (tabdim[1] + display.colnames) * cellheight,
xleft + sum(cellwidth), ytop - display.colnames *
cellheight)
}
par(oldpar)
}
| /R/addtable2plot.new.R | no_license | foramashar/fashaR | R | false | false | 5,875 | r | #' Updated function to add plot to table
#'
#' @param x Numeric vectors for position coord.
#' @param y Numeric vectors for position coord.
#' @param table Data table to add.
#' @param lwd,bty,bg,cex,xjust,yjust,xpad,ypad See par for details.
#' @param box.col,text.col,col.rowname,col.colname Details for color of bg and text for different aspects
#' @param display.colnames,display.rownames Logical vectors for whether row and column names should be displayed.
#' @param hlines,vlines Logical vectors for whether table should have horizontal and vertical lines.
#' @param title Value for table header.
#' @param text.font See par for details on font.
#' @export
#'
#' @seealso plotrix
#'
addtable2plot.new=function (x, y = NULL, table, lwd = par("lwd"), bty = "n", bg = par("bg"),
cex = 1, xjust= 0, yjust = 1, xpad = 0.1, ypad = 0.5, box.col = par("fg"),
text.col = par("fg"),col.rowname = par("fg"),col.colname = par("fg"), display.colnames = TRUE, display.rownames = FALSE,
hlines = FALSE, vlines = FALSE, title = NULL, text.font=NULL)
{
require(plotrix)
if (dev.cur() == 1)
stop("Cannot add table unless a graphics device is open")
if (is.null(y)) {
if (is.character(x)) {
tablepos <- get.tablepos(x)
x <- tablepos$x
y <- tablepos$y
xjust <- tablepos$xjust
yjust <- tablepos$yjust
}
else {
if (is.null(x$y))
stop("both x and y coordinates must be given")
y <- x$y
x <- x$x
}
}
droptop <- ifelse(any(c("topleft", "top", "topright") %in%
x), 1, 0)
tabdim <- dim(table)
if (tabdim[1] == 1)
hlines <- FALSE
if (tabdim[2] == 1)
vlines <- FALSE
if (is.null(dim(bg)))
bg <- matrix(bg, nrow = tabdim[1], ncol = tabdim[2])
if (is.null(dim(text.col)))
text.col <- matrix(text.col, nrow = tabdim[1], ncol = tabdim[2])
column.names <- colnames(table)
if (is.null(column.names) && display.colnames)
column.names <- 1:tabdim[2]
row.names <- rownames(table)
if (is.null(row.names) && display.rownames)
row.names <- 1:tabdim[1]
if (par("xlog"))
x <- log10(x)
cellwidth <- rep(0, tabdim[2])
if (display.colnames) {
for (column in 1:tabdim[2]) cellwidth[column] <- max(strwidth(c(column.names[column],
format(table[, column])), cex = cex)) * (1 + xpad)
nvcells <- tabdim[1] + 1
}
else {
nvcells <- tabdim[1]
for (column in 1:tabdim[2]) cellwidth[column] <- max(strwidth(format(table[,
column]), cex = cex)) * (1 + xpad)
}
if (display.rownames) {
nhcells <- tabdim[2] + 1
rowname.width <- max(strwidth(row.names, cex = cex)) *
(1 + xpad)
}
else {
nhcells <- tabdim[2]
rowname.width <- 0
}
if (par("ylog"))
y <- log10(y)
cellheight <- max(strheight(c(column.names, row.names, as.vector(unlist(table))),
cex = cex)) * (1 + ypad)
if (!is.null(title) & droptop)
y <- y - cellheight
ytop <- y + yjust * nvcells * cellheight
oldpar <- par(xlog = FALSE, ylog = FALSE, xpd = TRUE)
if (display.colnames) {
xleft <- x + display.rownames * rowname.width - xjust *
(sum(cellwidth) + rowname.width)
for (column in 1:tabdim[2]) {
text(xleft + cellwidth[column] * 0.5, ytop - 0.5 *
cellheight, column.names[column], cex = cex,
col = col.colname, font=text.font)
xleft <- xleft + cellwidth[column]
}
}
for (row in 1:tabdim[1]) {
xleft <- x - xjust * (sum(cellwidth) + rowname.width)
if (display.rownames) {
text(xleft + 0.5 * rowname.width, ytop - (row + display.colnames -
0.5) * cellheight, row.names[row], cex = cex,
col = col.rowname, font=text.font)
xleft <- xleft + rowname.width
}
for (column in 1:tabdim[2]) {
rect(xleft, ytop - (row + display.colnames - 1) *
cellheight, xleft + cellwidth[column], ytop -
(row + display.colnames) * cellheight, col = bg[row,
column], border = bg[row, column])
text(xleft + 0.5 * cellwidth[column], ytop - (row +
display.colnames - 0.5) * cellheight, table[row,
column], cex = cex, col = text.col[row,column], font=text.font)
xleft <- xleft + cellwidth[column]
}
}
if (vlines) {
xleft <- x + display.rownames * rowname.width - xjust *
(sum(cellwidth) + rowname.width)
segments(xleft + cumsum(cellwidth[-tabdim[2]]), ytop -
display.colnames * cellheight, xleft + cumsum(cellwidth[-tabdim[2]]),
ytop - (display.colnames + tabdim[1]) * cellheight)
}
if (hlines) {
xleft <- x + display.rownames * rowname.width - xjust *
(sum(cellwidth) + rowname.width)
segments(xleft, ytop - display.colnames * cellheight -
cumsum(rep(cellheight, tabdim[1] - 1)), xleft + sum(cellwidth),
ytop - display.colnames * cellheight - cumsum(rep(cellheight,
tabdim[1] - 1)))
}
if (!is.null(title)) {
xleft <- x - xjust * (sum(cellwidth) + rowname.width)
text(xleft + (rowname.width + sum(cellwidth))/2, ytop +
cellheight/2, title, cex = cex, col = text.col, font=text.font)
}
if (bty == "o") {
xleft <- x + display.rownames * rowname.width - xjust *
(sum(cellwidth) + rowname.width)
rect(xleft, ytop - (tabdim[1] + display.colnames) * cellheight,
xleft + sum(cellwidth), ytop - display.colnames *
cellheight)
}
par(oldpar)
}
|
#'@title Multiply two vectors
#'@description
#'This function multiplies two numeric vectors and returns
#'a numeric vector
#'
#'@param x a numeric vector
#'@param y a numeric vector
#'
#'@return a numeric vector
#'
#'@export
#'
#'@examples
#'multiply(2, 3)
#'multiply(mtcars$mpg, mtcars$hp)
multiply <- function(x, y){
x*y
}
| /R/multiply.R | permissive | marginal-latte/mathR | R | false | false | 329 | r | #'@title Multiply two vectors
#'@description
#'This function multiplies two numeric vectors and returns
#'a numeric vector
#'
#'@param x a numeric vector
#'@param y a numeric vector
#'
#'@return a numeric vector
#'
#'@export
#'
#'@examples
#'multiply(2, 3)
#'multiply(mtcars$mpg, mtcars$hp)
multiply <- function(x, y){
x*y
}
|
library(gofMC)
### Name: KS_D
### Title: KS_D
### Aliases: KS_D
### ** Examples
KS_D(c(1,2,3,4,5),c(1,2,3,4,4))
KS_D(matrix(runif(100),ncol=7),matrix(runif(100),ncol=7))
| /data/genthat_extracted_code/gofMC/examples/KS_D.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 178 | r | library(gofMC)
### Name: KS_D
### Title: KS_D
### Aliases: KS_D
### ** Examples
KS_D(c(1,2,3,4,5),c(1,2,3,4,4))
KS_D(matrix(runif(100),ncol=7),matrix(runif(100),ncol=7))
|
####################################################
###### The volatility updating rule ##
####################################################
ht<-function(para_h,Data.ret){
rt=Data.ret$rt/250 #### Interest rate Data : Data.BSJ$rt
ret =Data.ret$ret #### Returns : Data.BSJ$ret
Z1=length(ret)
# para_h<-c() set up the parameters of the model
a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[4]
# Parameter under the physical probability
lamda0star= -(1/2)
gamastar= gama+lamda0+(1/2)
h_star = c() #### A vector containing h from the model,
h_star[1]=(a0 + a1)/(1 - b1 - a1*(gamastar)^2 ) #### The first value for h,
for (i in 2:Z1){
h_star[i]=a0 +b1*h_star[i-1]+a1*(((ret[i-1]-rt[i-1]-lamda0star*(h_star[i-1]))/(sqrt(h_star[i-1]))) - gamastar*(sqrt(h_star[i-1])))^2
}
drapeau=0
if (a0<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (gama<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (lamda0<=0){drapeau=1}
if (drapeau==0){
resultat=h_star
}else{
resultat=rep(NA, Z1)
}
return(resultat)
}
####################################################
###### Step 4 : Compution the RMSR ##
####################################################
###### Computation of the Vega ##
####################################################
###### Black-Scholes Function for call ##
####################################################
C_BS <- function(S, K, T, r, sig,d, type="C"){
d1 <- (log(S/K) + (r -d + sig^2/2)*T) / (sig*sqrt(T))
d2 <- d1 - sig*sqrt(T)
if(type=="C"){
value <- S*pnorm(d1) - K*exp(-r*T)*pnorm(d2)
}
if(type=="P"){
value <- K*exp(-r*T)*pnorm(-d2) - S*pnorm(-d1)
}
return(value)
}
####################################################
###### BS Implied Vol using Bisection Method ##
####################################################
implied.vol <- function(S, K, T, r, C,d, type="C"){
sig <- 0.20
sig.up <- 1
sig.down <- 0.000001
count <- 0
C_market <- C
err <- C_BS(S, K, T, r, sig,0 ,type="C") - C_market
## repeat until error is sufficiently small or counter hits 1000
while(abs(err) > 0.000001 && count<10000){
if(err < 0){
sig.down <- sig
sig <- (sig.up + sig)/2
}else{
sig.up <- sig
sig <- (sig.down + sig)/2
}
err <- C_BS(S, K, T, r, sig,0, type) - C_market
count <- count + 1
}
## return NA if counter hit 1000
if(count==10000){
return(-1)
}else{
return(sig)
}
}
####################################################
###### To compute vega ##
####################################################
V<-function(S, K, T, r, C,d, type="C"){
sig<-implied.vol(S, K, T, r, C,d, type="C") ## Function to find BS Implied Vol using Bisection Method
d1 <- (log(S/K) + (r-d + sig^2/2)*T) / (sig*sqrt(T))
if(sig==-1){
return(V=10^6)
}else{
return(V=(1.0/sqrt(2*pi))*(S*exp(-r*T))*(exp(-((d1^2))))*sqrt(T))
}
}
Vega <- function(Data.N, type="C")
{
T=Data.N$T*250 #### Time to maturity expressed in terms of years in terms of days
S=Data.N$S #### Prix du sous-jacent: Data.contract$S
K=Data.N$K #### Strike Prix d'exercice: data$strike
r=Data.N$r/250 #### Interest rate Data.contract$r
C=Data.N$C #### Call price
d=Data.N$d*0 #### Call dividende
vega <- rep(NA, length(C))
for (i in 1:length(C)){
vega[i] = V(S[i], K[i], T[i], r[i], C[i], d[i], type="C")
}
return(vega)
}
############################################################
#### Function that returns Root Mean Squared Error ##
############################################################
RMSEsim <- function(para_h,Data.ret,Data.N)
{
C=Data.N$C #### Call price
P<-Pricer(N,para_h,Data.N)$P
V<-Vega(Data.N=Data.N, type="C")
error <- rep(NA, length(C))
for (i in 1:length(C)){
error[i] = ((P[i] - C[i])/V[i])^2
}
rmse<-sqrt((mean(error)))
return(list(rmse=rmse,P=P,error=error))
}
| /Simulation_juin2018/Estimation Paper 2 juin 2016/HN - verification/VIX Heston N/HN MLE/RMSE VIX HN.R | no_license | Fanirisoa/dynamic_pricing | R | false | false | 4,289 | r | ####################################################
###### The volatility updating rule ##
####################################################
ht<-function(para_h,Data.ret){
rt=Data.ret$rt/250 #### Interest rate Data : Data.BSJ$rt
ret =Data.ret$ret #### Returns : Data.BSJ$ret
Z1=length(ret)
# para_h<-c() set up the parameters of the model
a0=para_h[1]; a1=para_h[2]; gama=para_h[3]; b1= para_h[4] ; lamda0= para_h[4]
# Parameter under the physical probability
lamda0star= -(1/2)
gamastar= gama+lamda0+(1/2)
h_star = c() #### A vector containing h from the model,
h_star[1]=(a0 + a1)/(1 - b1 - a1*(gamastar)^2 ) #### The first value for h,
for (i in 2:Z1){
h_star[i]=a0 +b1*h_star[i-1]+a1*(((ret[i-1]-rt[i-1]-lamda0star*(h_star[i-1]))/(sqrt(h_star[i-1]))) - gamastar*(sqrt(h_star[i-1])))^2
}
drapeau=0
if (a0<=0){drapeau=1}
if (a1<=0){drapeau=1}
if (gama<=0){drapeau=1}
if (b1<=0){drapeau=1}
if (lamda0<=0){drapeau=1}
if (drapeau==0){
resultat=h_star
}else{
resultat=rep(NA, Z1)
}
return(resultat)
}
####################################################
###### Step 4 : Compution the RMSR ##
####################################################
###### Computation of the Vega ##
####################################################
###### Black-Scholes Function for call ##
####################################################
C_BS <- function(S, K, T, r, sig,d, type="C"){
d1 <- (log(S/K) + (r -d + sig^2/2)*T) / (sig*sqrt(T))
d2 <- d1 - sig*sqrt(T)
if(type=="C"){
value <- S*pnorm(d1) - K*exp(-r*T)*pnorm(d2)
}
if(type=="P"){
value <- K*exp(-r*T)*pnorm(-d2) - S*pnorm(-d1)
}
return(value)
}
####################################################
###### BS Implied Vol using Bisection Method ##
####################################################
implied.vol <- function(S, K, T, r, C,d, type="C"){
sig <- 0.20
sig.up <- 1
sig.down <- 0.000001
count <- 0
C_market <- C
err <- C_BS(S, K, T, r, sig,0 ,type="C") - C_market
## repeat until error is sufficiently small or counter hits 1000
while(abs(err) > 0.000001 && count<10000){
if(err < 0){
sig.down <- sig
sig <- (sig.up + sig)/2
}else{
sig.up <- sig
sig <- (sig.down + sig)/2
}
err <- C_BS(S, K, T, r, sig,0, type) - C_market
count <- count + 1
}
## return NA if counter hit 1000
if(count==10000){
return(-1)
}else{
return(sig)
}
}
####################################################
###### To compute vega ##
####################################################
V<-function(S, K, T, r, C,d, type="C"){
sig<-implied.vol(S, K, T, r, C,d, type="C") ## Function to find BS Implied Vol using Bisection Method
d1 <- (log(S/K) + (r-d + sig^2/2)*T) / (sig*sqrt(T))
if(sig==-1){
return(V=10^6)
}else{
return(V=(1.0/sqrt(2*pi))*(S*exp(-r*T))*(exp(-((d1^2))))*sqrt(T))
}
}
Vega <- function(Data.N, type="C")
{
T=Data.N$T*250 #### Time to maturity expressed in terms of years in terms of days
S=Data.N$S #### Prix du sous-jacent: Data.contract$S
K=Data.N$K #### Strike Prix d'exercice: data$strike
r=Data.N$r/250 #### Interest rate Data.contract$r
C=Data.N$C #### Call price
d=Data.N$d*0 #### Call dividende
vega <- rep(NA, length(C))
for (i in 1:length(C)){
vega[i] = V(S[i], K[i], T[i], r[i], C[i], d[i], type="C")
}
return(vega)
}
############################################################
#### Function that returns Root Mean Squared Error ##
############################################################
RMSEsim <- function(para_h,Data.ret,Data.N)
{
C=Data.N$C #### Call price
P<-Pricer(N,para_h,Data.N)$P
V<-Vega(Data.N=Data.N, type="C")
error <- rep(NA, length(C))
for (i in 1:length(C)){
error[i] = ((P[i] - C[i])/V[i])^2
}
rmse<-sqrt((mean(error)))
return(list(rmse=rmse,P=P,error=error))
}
|
# AUTO_DETECT_NEWVAR <- FALSE
notify <- function() {
e <- get("e", parent.frame())
if(e$val == "No") return(TRUE)
good <- FALSE
while(!good) {
# Get info
name <- readline_clean("What is your full name? ")
address <- readline_clean("What is the email address of the person you'd like to notify? ")
# Repeat back to them
message("\nDoes everything look good?\n")
message("Your name: ", name, "\n", "Send to: ", address)
yn <- select.list(c("Yes", "No"), graphics = FALSE)
if(yn == "Yes") good <- TRUE
}
# Get course and lesson names
course_name <- attr(e$les, "course_name")
lesson_name <- attr(e$les, "lesson_name")
subject <- paste(name, "just completed", course_name, "-", lesson_name)
body = ""
# Send email
swirl:::email(address, subject, body)
hrule()
message("I just tried to create a new email with the following info:\n")
message("To: ", address)
message("Subject: ", subject)
message("Body: <empty>")
message("\nIf it didn't work, you can send the same email manually.")
hrule()
# Return TRUE to satisfy swirl and return to course menu
TRUE
}
readline_clean <- function(prompt = "") {
wrapped <- strwrap(prompt, width = getOption("width") - 2)
mes <- stringr::str_c("| ", wrapped, collapse = "\n")
message(mes)
readline()
}
hrule <- function() {
message("\n", paste0(rep("#", getOption("width") - 2), collapse = ""), "\n")
} | /Looking_at_Data/customTests.R | no_license | angieluis/Swirl_TheoreticalEcology | R | false | false | 1,450 | r | # AUTO_DETECT_NEWVAR <- FALSE
notify <- function() {
e <- get("e", parent.frame())
if(e$val == "No") return(TRUE)
good <- FALSE
while(!good) {
# Get info
name <- readline_clean("What is your full name? ")
address <- readline_clean("What is the email address of the person you'd like to notify? ")
# Repeat back to them
message("\nDoes everything look good?\n")
message("Your name: ", name, "\n", "Send to: ", address)
yn <- select.list(c("Yes", "No"), graphics = FALSE)
if(yn == "Yes") good <- TRUE
}
# Get course and lesson names
course_name <- attr(e$les, "course_name")
lesson_name <- attr(e$les, "lesson_name")
subject <- paste(name, "just completed", course_name, "-", lesson_name)
body = ""
# Send email
swirl:::email(address, subject, body)
hrule()
message("I just tried to create a new email with the following info:\n")
message("To: ", address)
message("Subject: ", subject)
message("Body: <empty>")
message("\nIf it didn't work, you can send the same email manually.")
hrule()
# Return TRUE to satisfy swirl and return to course menu
TRUE
}
readline_clean <- function(prompt = "") {
wrapped <- strwrap(prompt, width = getOption("width") - 2)
mes <- stringr::str_c("| ", wrapped, collapse = "\n")
message(mes)
readline()
}
hrule <- function() {
message("\n", paste0(rep("#", getOption("width") - 2), collapse = ""), "\n")
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{lines_to_board}
\alias{lines_to_board}
\title{lines_to_board, helper function for read_boards}
\usage{
lines_to_board(line)
}
\arguments{
\item{line}{turn the lines into boards}
}
\value{
turn the lines into boards
}
\description{
lines_to_board, helper function for read_boards
}
| /man/lines_to_board.Rd | no_license | rebeccapei16/percolate | R | false | true | 373 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{lines_to_board}
\alias{lines_to_board}
\title{lines_to_board, helper function for read_boards}
\usage{
lines_to_board(line)
}
\arguments{
\item{line}{turn the lines into boards}
}
\value{
turn the lines into boards
}
\description{
lines_to_board, helper function for read_boards
}
|
#Are nestlings fledging at a lower weight than they did in the past?
#I'm interested in knowing this because it could help explain why we see
#differences in recruitment across years. It could also explain why local
#weather conditions only affect survival when the population is declining. If
#the juveniles are poor quality they don't have much buffer and both they and
#their parents may have to work harder to feed themselves in poor weather.
library(tidyverse)
library(lme4)
library(MuMIn)
dat <- read.csv("~/Masters Thesis Project/Tree Swallow Data/Amelia TRES data 1975-2016/Extracted Data for Analysis/Nestling Measurements for Analysis 1975-2017.csv", as.is=T)
#There look to be some points in there that must be typos or misrecordings because there's no way the bird was that size.
ggplot(dat %>% filter(mass<30, age>0, age<20), aes(x=age, y=mass))+
geom_point()+
geom_smooth(method="lm", formula=y~poly(x, 3))
dat2 <- dat %>% filter(mass<30, age>2, age<17)
ggplot(dat2 %>% filter(age>=10), aes(x=age, y=mass))+
geom_point()+
geom_smooth(method="lm")+
ggthemes::theme_few()
#looks like even at the very coarse population status level, we may be seeing
#differences in mass for the older birds.
#OK let's try modelling changes in nestling mass through time. If we model all
#the ages we probably need to include something to deal with the polynomial
#nature of the data. Instead, perhaps it's better to only look at the last
#measurements (day 10 and on), when they are closer to fledging. I used only one
#measurement per nestling (the one closest to 12 days old)
dat3 <- dat2 %>%
filter(!is.na(year) & !is.na(age) & age>9 & age<=16 ) %>%
group_by(nestlingID) %>%
slice(which.min(abs(age)-12)) %>%
mutate(year2 =(year-1977)/10,
age2=age-10)
dat4 <- dat2 %>%
filter(!is.na(year) & !is.na(age) & age>9 & age<=16 ) %>%
group_by(nestlingID) %>%
slice(which.min(abs(age)-12))%>%
group_by(nestID, year) %>%
summarise(mmass=mean(mass),
age=mean(age)) %>%
mutate(year2 =(year-1977)/10,
age2=age-10)
length(unique(dat3$nestID))
#Data set has 28, 577 rows, and 2,915 unique nests, 13,842 unique nestlings.
ggplot(dat3, aes(x=age, y=mass))+
geom_point()+
geom_smooth(method="lm", formula=y~x)
#The problem for the temporal autocorrelation is that it's unclear WHAT to
#temporally autocorrelate. (e.g. it knows that it can't temporally
#autocorrelate nests because they only show up within one year but it's unclear)
mod1 <- lmer(mass~age*year2 + (1|nestID), data=dat3, na.action="na.fail")
mod1 <- lme(mass~age*year2,
random=~1|nestID,
data=dat3,
na.action="na.fail")
dat3$res <- residuals(mod1, type = "response")
dat4 <- dat3 %>% mutate(epsilon = lag(res))
summary(mod1)
plot(ACF(mod1))
mod1 <- lme(mass~age*year2+epsilon,
random=~1|nestID,
data=dat3,
na.action="na.fail")
plot(mod1)
qqnorm(mod1)
hist(resid(mod1))
plot(resid(mod1)~dat3$age)
plot(resid(mod1)~dat3$year2)
#This model looks really good.
#Need to keep the random nest effect
summary(mod)
dredge(mod)
anova(mod, test="F")
mod1_c <- lme(mass~age*year2,
random=~1|nestID,
correlation = corAR1(form=~year2|nestID), #CAN"T have a crouping because there's nothing
data=dat3,
na.action="na.fail")
mod1_c <- gls(mmass~age*year2,
correlation = corCAR1(form=~year2), #CAN"T have a crouping because there's nothing
data=dat4,
na.action="na.fail")
#We should keep all terms
AICc(mod1, mod1_c) #Better not to have the correlation structure.
mam <- lmer(mass~age*year2 + (1|nestID), data=dat3, na.action="na.fail")
summary(mam)
#We'll center age at 10 and 15
lmerTest
library(lmerTest)
anova(mam, test="F")
newdata <- data.frame(age=c(rep(10, 41 ), rep(15, 41)),
year2=rep(seq(0.2,4.2 , 0.1), 2),
age2= c(rep(0, 41 ), rep(5, 41)),
year=rep(seq(1977,2017 , 1), 2),
predicted=NA,
lcl=NA,
ucl=NA)
#calculate predicted values.
newdata$predicted <- predict(mam, newdata, re.form=NA)
#age= 16 for the ready to fledge birds and 12 for the not
#bootstrap confidence intervales based on https://github.com/lme4/lme4/issues/388
## param only (not including individual variation or anything like that)
b3 <- bootMer(mam,FUN=function(x) predict(x,newdata=newdata,re.form=~0),
## re.form=~0 is equivalent to use.u=FALSE
nsim=100,seed=101)
bootsum <- function(x,ext="_1") {
d <- t(data.frame(apply(x$t,2,
function(x) c(mean(x),quantile(x,c(0.025,0.975))))))
colnames(d) <- c("bpred","lwr","upr")
return(d)
}
newdata[, 5:7] <- bootsum(b3,"_3")
newdata$age <- as.factor(newdata$age)
ggplot(newdata, aes(x=year))+
geom_ribbon(aes( ymin=lcl, ymax=ucl, fill=age), alpha=0.3)+
geom_line(aes(y=predicted, linetype=age), color="black")+
labs(y="Body mass (g)", x="Year", linetype="Age (days)", fill="Age (days)")+
xlim(1975,2017)+
scale_fill_grey()+
scale_color_grey()+
#geom_point(data=dat3 %>% filter(age==15 | age==10), aes(x=year, y=mass, color= factor(age)), alpha=0.5)+
theme_classic(base_size = 16, base_family="serif")+
theme(legend.position = c(0.85, 0.8))
ggsave(filename="~/Masters Thesis Project/Weather determined growth and mortality paper/Plots/Nestling mass through time.jpeg", units="in", width=5, height=4, device="jpeg")
ggsave(filename="~/Masters Thesis Project/Weather determined growth and mortality paper/Plots/Nestling mass through time.pdf", units="in", width=5, height=4, device="pdf")
ggplot(newdata, aes(x=year))+
geom_ribbon(aes( ymin=lcl, ymax=ucl, fill=age), alpha=0.3, show.legend = F)+
geom_line(aes(y=predicted, linetype=age), color="black", show.legend = F)+
geom_point(data=dat3 %>% filter(age==10 | age==15), aes(x=year, y=mass), shape=1)+
labs(y="Body mass (g)", x="Year", linetype="Age (days)", fill="Age (days)")+
xlim(1975,2017)+
scale_fill_grey()+
scale_color_grey()+
#geom_point(data=dat3 %>% filter(age==15 | age==10), aes(x=year, y=mass, color= factor(age)), alpha=0.5)+
theme_classic(base_size = 16, base_family="serif")+
facet_grid(~age )
ggsave(filename="~/Masters Thesis Project/Weather determined growth and mortality paper/Plots/Nestling mass through time with points.jpeg", units="in", width=8, height=4, device="jpeg")
####Presentation quality graphs
ggplot(newdata, aes(x=year))+
geom_ribbon(aes( ymin=lcl, ymax=ucl, fill=age), alpha=0.3)+
geom_line(aes(y=predicted, linetype=age), color="black")+
labs(y="Body mass (g)", x="Year", linetype="Age (days)", fill="Age (days)")+
xlim(1975,2017)+
#geom_point(data=dat3 %>% filter(age==15 | age==10), aes(x=year, y=mass, color= factor(age)), alpha=0.5)+
theme_classic(base_size = 20)+
theme(legend.position = c(0.85, 0.8),
axis.title.y=element_text(angle=0, vjust=0.5))
ggsave(filename="~/Masters Thesis Project/NACCB Conference/Presentation Figures/Nestling mass through time.jpeg", units="in", width=8, height=5, device="jpeg")
##################Need to double check that nestlings haven't just gotten smaller.
dat4 <- dat %>% filter(!is.na(year) & !is.na(ninprim) & !is.na(age) & ninprim<80 & age>9 & age<16 & year< 2007) %>% group_by(nestlingID) %>% slice(which.min(abs(age)-12))
ggplot(dat4, aes(x=age, y=ninprim))+
geom_point()+
geom_smooth()
ggplot(dat4, aes(x=year, y=age))+
geom_point()+
geom_smooth()+
ylim(10,16)
dat4$year2 <- (dat4$year-1975) /10
wmod <- lmer(ninprim~age*year2 + (1|nestID), data=dat4, na.action="na.fail")
plot(wmod)
hist(resid(wmod))
plot(resid(wmod)~dat4$year)
plot(resid(wmod)~dat4$age)
#looks good. Although there is a fairly large chunk of time where we are missing measurements
summary(wmod)
dredge(wmod)
anova(wmod)
wmam <- lmer(ninprim~age*year2 + (1|nestID), data=dat4, na.action="na.fail")
summary(wmam)
anova(wmam)
wnewdata <- data.frame(age=c(rep(12, 30 ), rep(15, 30)),
year2=rep(seq(1.3,4.2 , 0.1), 2),
year=rep(seq(1988,2017 , 1), 2),
predicted=NA,
lcl=NA,
ucl=NA)
#calculate predicted values.
wnewdata$predicted <- predict(wmam, wnewdata, re.form=NA)
#age= 16 for the ready to fledge birds and 12 for the not
#bootstrap confidence intervales based on https://github.com/lme4/lme4/issues/388
## param only (not including individual variation or anything like that)
b3 <- bootMer(wmam,FUN=function(x) predict(x,newdata=wnewdata,re.form=~0),
## re.form=~0 is equivalent to use.u=FALSE
nsim=100,seed=101)
wnewdata[4:6] <- bootsum(b3,"_3")
wnewdata$age <- as.factor(wnewdata$age)
ggplot(wnewdata, aes(x=year))+
geom_line(aes(y=predicted, color=age))+
geom_ribbon(aes( ymin=lcl, ymax=ucl, fill=age), alpha=0.3)+
labs(y="Wing chord (mm)", x="Year", color="Age (days)", fill="Age (days)")+
xlim(1975,2017)+
scale_fill_manual(values=c("steelblue","orchid3"))+
scale_color_manual(values=c("steelblue","orchid3"))+
#geom_point(data=dat4 %>% filter(age==15 | age==10), aes(x=year, y=ninprim, color= factor(age)), alpha=0.5)+
theme_classic(base_size = 16)
| /Nestling Mass through time.R | no_license | 11arc4/Weather-related-mortality-and-growth | R | false | false | 9,383 | r | #Are nestlings fledging at a lower weight than they did in the past?
#I'm interested in knowing this because it could help explain why we see
#differences in recruitment across years. It could also explain why local
#weather conditions only affect survival when the population is declining. If
#the juveniles are poor quality they don't have much buffer and both they and
#their parents may have to work harder to feed themselves in poor weather.
library(tidyverse)
library(lme4)
library(MuMIn)
dat <- read.csv("~/Masters Thesis Project/Tree Swallow Data/Amelia TRES data 1975-2016/Extracted Data for Analysis/Nestling Measurements for Analysis 1975-2017.csv", as.is=T)
#There look to be some points in there that must be typos or misrecordings because there's no way the bird was that size.
ggplot(dat %>% filter(mass<30, age>0, age<20), aes(x=age, y=mass))+
geom_point()+
geom_smooth(method="lm", formula=y~poly(x, 3))
dat2 <- dat %>% filter(mass<30, age>2, age<17)
ggplot(dat2 %>% filter(age>=10), aes(x=age, y=mass))+
geom_point()+
geom_smooth(method="lm")+
ggthemes::theme_few()
#looks like even at the very coarse population status level, we may be seeing
#differences in mass for the older birds.
#OK let's try modelling changes in nestling mass through time. If we model all
#the ages we probably need to include something to deal with the polynomial
#nature of the data. Instead, perhaps it's better to only look at the last
#measurements (day 10 and on), when they are closer to fledging. I used only one
#measurement per nestling (the one closest to 12 days old)
dat3 <- dat2 %>%
filter(!is.na(year) & !is.na(age) & age>9 & age<=16 ) %>%
group_by(nestlingID) %>%
slice(which.min(abs(age)-12)) %>%
mutate(year2 =(year-1977)/10,
age2=age-10)
dat4 <- dat2 %>%
filter(!is.na(year) & !is.na(age) & age>9 & age<=16 ) %>%
group_by(nestlingID) %>%
slice(which.min(abs(age)-12))%>%
group_by(nestID, year) %>%
summarise(mmass=mean(mass),
age=mean(age)) %>%
mutate(year2 =(year-1977)/10,
age2=age-10)
length(unique(dat3$nestID))
#Data set has 28, 577 rows, and 2,915 unique nests, 13,842 unique nestlings.
ggplot(dat3, aes(x=age, y=mass))+
geom_point()+
geom_smooth(method="lm", formula=y~x)
#The problem for the temporal autocorrelation is that it's unclear WHAT to
#temporally autocorrelate. (e.g. it knows that it can't temporally
#autocorrelate nests because they only show up within one year but it's unclear)
mod1 <- lmer(mass~age*year2 + (1|nestID), data=dat3, na.action="na.fail")
mod1 <- lme(mass~age*year2,
random=~1|nestID,
data=dat3,
na.action="na.fail")
dat3$res <- residuals(mod1, type = "response")
dat4 <- dat3 %>% mutate(epsilon = lag(res))
summary(mod1)
plot(ACF(mod1))
mod1 <- lme(mass~age*year2+epsilon,
random=~1|nestID,
data=dat3,
na.action="na.fail")
plot(mod1)
qqnorm(mod1)
hist(resid(mod1))
plot(resid(mod1)~dat3$age)
plot(resid(mod1)~dat3$year2)
#This model looks really good.
#Need to keep the random nest effect
summary(mod)
dredge(mod)
anova(mod, test="F")
mod1_c <- lme(mass~age*year2,
random=~1|nestID,
correlation = corAR1(form=~year2|nestID), #CAN"T have a crouping because there's nothing
data=dat3,
na.action="na.fail")
mod1_c <- gls(mmass~age*year2,
correlation = corCAR1(form=~year2), #CAN"T have a crouping because there's nothing
data=dat4,
na.action="na.fail")
#We should keep all terms
AICc(mod1, mod1_c) #Better not to have the correlation structure.
mam <- lmer(mass~age*year2 + (1|nestID), data=dat3, na.action="na.fail")
summary(mam)
#We'll center age at 10 and 15
lmerTest
library(lmerTest)
anova(mam, test="F")
newdata <- data.frame(age=c(rep(10, 41 ), rep(15, 41)),
year2=rep(seq(0.2,4.2 , 0.1), 2),
age2= c(rep(0, 41 ), rep(5, 41)),
year=rep(seq(1977,2017 , 1), 2),
predicted=NA,
lcl=NA,
ucl=NA)
#calculate predicted values.
newdata$predicted <- predict(mam, newdata, re.form=NA)
#age= 16 for the ready to fledge birds and 12 for the not
#bootstrap confidence intervales based on https://github.com/lme4/lme4/issues/388
## param only (not including individual variation or anything like that)
b3 <- bootMer(mam,FUN=function(x) predict(x,newdata=newdata,re.form=~0),
## re.form=~0 is equivalent to use.u=FALSE
nsim=100,seed=101)
bootsum <- function(x,ext="_1") {
d <- t(data.frame(apply(x$t,2,
function(x) c(mean(x),quantile(x,c(0.025,0.975))))))
colnames(d) <- c("bpred","lwr","upr")
return(d)
}
newdata[, 5:7] <- bootsum(b3,"_3")
newdata$age <- as.factor(newdata$age)
ggplot(newdata, aes(x=year))+
geom_ribbon(aes( ymin=lcl, ymax=ucl, fill=age), alpha=0.3)+
geom_line(aes(y=predicted, linetype=age), color="black")+
labs(y="Body mass (g)", x="Year", linetype="Age (days)", fill="Age (days)")+
xlim(1975,2017)+
scale_fill_grey()+
scale_color_grey()+
#geom_point(data=dat3 %>% filter(age==15 | age==10), aes(x=year, y=mass, color= factor(age)), alpha=0.5)+
theme_classic(base_size = 16, base_family="serif")+
theme(legend.position = c(0.85, 0.8))
ggsave(filename="~/Masters Thesis Project/Weather determined growth and mortality paper/Plots/Nestling mass through time.jpeg", units="in", width=5, height=4, device="jpeg")
ggsave(filename="~/Masters Thesis Project/Weather determined growth and mortality paper/Plots/Nestling mass through time.pdf", units="in", width=5, height=4, device="pdf")
ggplot(newdata, aes(x=year))+
geom_ribbon(aes( ymin=lcl, ymax=ucl, fill=age), alpha=0.3, show.legend = F)+
geom_line(aes(y=predicted, linetype=age), color="black", show.legend = F)+
geom_point(data=dat3 %>% filter(age==10 | age==15), aes(x=year, y=mass), shape=1)+
labs(y="Body mass (g)", x="Year", linetype="Age (days)", fill="Age (days)")+
xlim(1975,2017)+
scale_fill_grey()+
scale_color_grey()+
#geom_point(data=dat3 %>% filter(age==15 | age==10), aes(x=year, y=mass, color= factor(age)), alpha=0.5)+
theme_classic(base_size = 16, base_family="serif")+
facet_grid(~age )
ggsave(filename="~/Masters Thesis Project/Weather determined growth and mortality paper/Plots/Nestling mass through time with points.jpeg", units="in", width=8, height=4, device="jpeg")
####Presentation quality graphs
ggplot(newdata, aes(x=year))+
geom_ribbon(aes( ymin=lcl, ymax=ucl, fill=age), alpha=0.3)+
geom_line(aes(y=predicted, linetype=age), color="black")+
labs(y="Body mass (g)", x="Year", linetype="Age (days)", fill="Age (days)")+
xlim(1975,2017)+
#geom_point(data=dat3 %>% filter(age==15 | age==10), aes(x=year, y=mass, color= factor(age)), alpha=0.5)+
theme_classic(base_size = 20)+
theme(legend.position = c(0.85, 0.8),
axis.title.y=element_text(angle=0, vjust=0.5))
ggsave(filename="~/Masters Thesis Project/NACCB Conference/Presentation Figures/Nestling mass through time.jpeg", units="in", width=8, height=5, device="jpeg")
##################Need to double check that nestlings haven't just gotten smaller.
dat4 <- dat %>% filter(!is.na(year) & !is.na(ninprim) & !is.na(age) & ninprim<80 & age>9 & age<16 & year< 2007) %>% group_by(nestlingID) %>% slice(which.min(abs(age)-12))
ggplot(dat4, aes(x=age, y=ninprim))+
geom_point()+
geom_smooth()
ggplot(dat4, aes(x=year, y=age))+
geom_point()+
geom_smooth()+
ylim(10,16)
dat4$year2 <- (dat4$year-1975) /10
wmod <- lmer(ninprim~age*year2 + (1|nestID), data=dat4, na.action="na.fail")
plot(wmod)
hist(resid(wmod))
plot(resid(wmod)~dat4$year)
plot(resid(wmod)~dat4$age)
#looks good. Although there is a fairly large chunk of time where we are missing measurements
summary(wmod)
dredge(wmod)
anova(wmod)
wmam <- lmer(ninprim~age*year2 + (1|nestID), data=dat4, na.action="na.fail")
summary(wmam)
anova(wmam)
wnewdata <- data.frame(age=c(rep(12, 30 ), rep(15, 30)),
year2=rep(seq(1.3,4.2 , 0.1), 2),
year=rep(seq(1988,2017 , 1), 2),
predicted=NA,
lcl=NA,
ucl=NA)
#calculate predicted values.
wnewdata$predicted <- predict(wmam, wnewdata, re.form=NA)
#age= 16 for the ready to fledge birds and 12 for the not
#bootstrap confidence intervales based on https://github.com/lme4/lme4/issues/388
## param only (not including individual variation or anything like that)
b3 <- bootMer(wmam,FUN=function(x) predict(x,newdata=wnewdata,re.form=~0),
## re.form=~0 is equivalent to use.u=FALSE
nsim=100,seed=101)
wnewdata[4:6] <- bootsum(b3,"_3")
wnewdata$age <- as.factor(wnewdata$age)
ggplot(wnewdata, aes(x=year))+
geom_line(aes(y=predicted, color=age))+
geom_ribbon(aes( ymin=lcl, ymax=ucl, fill=age), alpha=0.3)+
labs(y="Wing chord (mm)", x="Year", color="Age (days)", fill="Age (days)")+
xlim(1975,2017)+
scale_fill_manual(values=c("steelblue","orchid3"))+
scale_color_manual(values=c("steelblue","orchid3"))+
#geom_point(data=dat4 %>% filter(age==15 | age==10), aes(x=year, y=ninprim, color= factor(age)), alpha=0.5)+
theme_classic(base_size = 16)
|
`mudiff.mblmodwoc` <-
function(len,alpha1,beta1,alpha2,beta2,level=0.95,worst.level=0.95,m=50000,mcs=3)
{
min.for.possible.return <- 2^ceiling(1.5*mcs)
# If we always allow a return, there is a risk of making bad steps
# when we are close to the answer.
# Thus, we should not allow any return once some arbitrary 'step' (which is
# 'min.for.possible.return') is reached.
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#**************************************************************************
# Define initial step
# (as a function of any frequentist sample size estimate)
step <- ceiling(log(mudiff.freq(len,alpha1/beta1,alpha2/beta2,level)[1])/log(2))
# Also define the threshold to cross for the quantity under study (the
# length or the coverage probability of an HPD region)
threshold <- level
# and define a factor, which is +/- 1, depending on if the quantity under
# study is (almost) surely too large or too small when making no
# observations [-1 if the quantity to measure is DEcreasing with n
# +1 if the quantity to measure is INcreasing with n]
#
# [ -1 if threshold_len, +1 if thresold_level ]
factor <- +1
#**************************************************************************
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
quantity.to.measure <- ifelse(factor == +1,0,2*threshold)
step <- 2^step
history.ns <- 0
history.steps <- 0
# history.cons.steps_0
n1 <- 0
max.cons.steps.same.dir <- mcs
found.upper.bound <- FALSE
possible.to.move.back <- TRUE
cons.steps.same.dir <- 0
direction <- +1
while(step>=1)
{
while(sign(factor*(threshold-quantity.to.measure)) == direction && step >= 1)
{
step[found.upper.bound] <- max(1,step/2)
possible.to.move.back[step < min.for.possible.return &&
found.upper.bound] <- FALSE
n1 <- n1+direction*step
if(n1 <= 2) {
found.lower.bound <- TRUE
n1 <- 2
}
cons.steps.same.dir <- cons.steps.same.dir+1
history.ns <- c(n1,history.ns)
history.steps <- c(step*direction,history.steps)
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#************************************************************************
# Define n2 from n1
n2 <- n1
#*********************************
# Let total.var=n1*s21/n1/(n1-1)+n2*s22/n2/(n2-1)
total.var <- rgg(m,alpha1,2*beta1/n1/(n1-1),(n1-1)/2)+
rgg(m,alpha2,2*beta2/n2/(n2-1),(n2-1)/2)
worst.var <- sort(total.var)[worst.level*m]
quantity.to.measure <- 2*pnorm(len/2/sqrt(worst.var))-1
#************************************************************************
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if(found.upper.bound &&
cons.steps.same.dir == max.cons.steps.same.dir+1 &&
possible.to.move.back)
{
if(sign(factor*(threshold-quantity.to.measure)) == direction)
{
# There was (most likely) a mistake, look for n's in the other direction
at.n <- seq(along=history.ns)[history.ns == n1 ]
hs.an <- history.steps[at.n]
step <- abs(hs.an[sign(hs.an) != direction][1])
cons.steps.same.dir <- 0
# and if there has never been a step coming from the other direction
step[is.na(step)] <- max(abs(hs.an))
}
else
{
# There was (most likely) no mistake; keep looking around the same n's
direction <- -direction
cons.steps.same.dir <- 0
}
}
if(found.upper.bound &&
cons.steps.same.dir==max.cons.steps.same.dir &&
sign(factor*(threshold-quantity.to.measure))==direction &&
possible.to.move.back)
{
step <- 2*step
}
}
found.upper.bound <- TRUE
direction <- -direction
cons.steps.same.dir <- 0
step[step==1] <- 0
}
direction[n1==0] <- 0
n1[direction==+1] <- n1+1
# Return
c(n1,n1)
}
| /R/mudiff.mblmodwoc.R | no_license | cran/SampleSizeMeans | R | false | false | 4,285 | r | `mudiff.mblmodwoc` <-
function(len,alpha1,beta1,alpha2,beta2,level=0.95,worst.level=0.95,m=50000,mcs=3)
{
min.for.possible.return <- 2^ceiling(1.5*mcs)
# If we always allow a return, there is a risk of making bad steps
# when we are close to the answer.
# Thus, we should not allow any return once some arbitrary 'step' (which is
# 'min.for.possible.return') is reached.
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#**************************************************************************
# Define initial step
# (as a function of any frequentist sample size estimate)
step <- ceiling(log(mudiff.freq(len,alpha1/beta1,alpha2/beta2,level)[1])/log(2))
# Also define the threshold to cross for the quantity under study (the
# length or the coverage probability of an HPD region)
threshold <- level
# and define a factor, which is +/- 1, depending on if the quantity under
# study is (almost) surely too large or too small when making no
# observations [-1 if the quantity to measure is DEcreasing with n
# +1 if the quantity to measure is INcreasing with n]
#
# [ -1 if threshold_len, +1 if thresold_level ]
factor <- +1
#**************************************************************************
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
quantity.to.measure <- ifelse(factor == +1,0,2*threshold)
step <- 2^step
history.ns <- 0
history.steps <- 0
# history.cons.steps_0
n1 <- 0
max.cons.steps.same.dir <- mcs
found.upper.bound <- FALSE
possible.to.move.back <- TRUE
cons.steps.same.dir <- 0
direction <- +1
while(step>=1)
{
while(sign(factor*(threshold-quantity.to.measure)) == direction && step >= 1)
{
step[found.upper.bound] <- max(1,step/2)
possible.to.move.back[step < min.for.possible.return &&
found.upper.bound] <- FALSE
n1 <- n1+direction*step
if(n1 <= 2) {
found.lower.bound <- TRUE
n1 <- 2
}
cons.steps.same.dir <- cons.steps.same.dir+1
history.ns <- c(n1,history.ns)
history.steps <- c(step*direction,history.steps)
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
#************************************************************************
# Define n2 from n1
n2 <- n1
#*********************************
# Let total.var=n1*s21/n1/(n1-1)+n2*s22/n2/(n2-1)
total.var <- rgg(m,alpha1,2*beta1/n1/(n1-1),(n1-1)/2)+
rgg(m,alpha2,2*beta2/n2/(n2-1),(n2-1)/2)
worst.var <- sort(total.var)[worst.level*m]
quantity.to.measure <- 2*pnorm(len/2/sqrt(worst.var))-1
#************************************************************************
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if(found.upper.bound &&
cons.steps.same.dir == max.cons.steps.same.dir+1 &&
possible.to.move.back)
{
if(sign(factor*(threshold-quantity.to.measure)) == direction)
{
# There was (most likely) a mistake, look for n's in the other direction
at.n <- seq(along=history.ns)[history.ns == n1 ]
hs.an <- history.steps[at.n]
step <- abs(hs.an[sign(hs.an) != direction][1])
cons.steps.same.dir <- 0
# and if there has never been a step coming from the other direction
step[is.na(step)] <- max(abs(hs.an))
}
else
{
# There was (most likely) no mistake; keep looking around the same n's
direction <- -direction
cons.steps.same.dir <- 0
}
}
if(found.upper.bound &&
cons.steps.same.dir==max.cons.steps.same.dir &&
sign(factor*(threshold-quantity.to.measure))==direction &&
possible.to.move.back)
{
step <- 2*step
}
}
found.upper.bound <- TRUE
direction <- -direction
cons.steps.same.dir <- 0
step[step==1] <- 0
}
direction[n1==0] <- 0
n1[direction==+1] <- n1+1
# Return
c(n1,n1)
}
|
#################################################################################################
###This code will generate grower reports by:
#1.bring in the strip data for a site
#2.run paired t-test,
#3.create plots
#4.Accesses lab results and generates reports
## Approach using polygon to pull out raw yield data
###############################################################################################
### load in the libraries
#install.packages("PairedData")
#install.packages("RGraphics")
#install.packages("gridExtra")
#install.packages("rdrop2")
library(dplyr)
library(tidyverse)
library(ggplot2)
library(readxl)
library(PairedData)
library(cowplot)
library(grid)
library(RGraphics)
library(gridExtra)
library(rdrop2)
###############################################################################################
##1a. Details about the site what it looks like in the database
database_name_of_path <-
file.path(
"W:",
"value_soil_testing_prj",
"data_base")
Organisation_db = "Landmark"
Contact_Farmer_db = "Matt Nihill 7"
Paddock_tested_db = "Jenharwil 2"
Zone_db = "James 2"
data_file = "James_Yld_Seg_ID_zone.csv"
Fert_legend_name <- "N Rates"
##1b. set path for getting my spatial data and location of saving outputs
name_of_path <-
file.path(
"W:",
"value_soil_testing_prj",
"Yield_data",
Organisation_db,
"Matt_Nihill",
"Jenharwil_2",
"James 2")
graph_path <-
file.path(name_of_path)
seg_ID <- read_csv(paste0(name_of_path, "/", data_file))
names(seg_ID)
##1c. make name consistant
seg_ID <-
rename(seg_ID,
"Rates" = "Rate", #new name = old name
"Zone" = "Zone",
"Yld" = "Yld_Mass_D"
)
##1c. Set up data so its generic growers rate, rate1, rate2, rate3, zone1, zone2
#Define the rates
unique(seg_ID$Rates)
Grower_rate = 80
rate1 = 0
rate2 = 150
#rate3 = 110
list_rates <- data.frame( rate_name = c("Grower_rate" , "rate1", "rate2"),
Rates = c(Grower_rate,rate1, rate2 ) )
list_rates
#Define the zones
unique(seg_ID$Zone)
zone1 <- "Low"
zone2 <- "High"
############################################################################################################################
### clean the data removing zero values
## remove all the values in the data set that won't be included in the analysis this is when distance on line = 0
seg_ID <- filter(seg_ID,
DistOnLine != 0)
#The farmer practice wasnt really a true strip but I want to use the data so I need to remove row when we have no yield
seg_ID <- filter(seg_ID,
Yld != 0)
#############################################################################################################################
##2. t test per segment in the strip Via Andrea method####
#Prep the data so I can check what am I testing (look at Harms list)
seg_ID_rate1vsGR <- filter(seg_ID, Rates == rate1 | Rates== Grower_rate )
seg_ID_rate2vsGR <- filter(seg_ID, Rates == rate2 | Rates== Grower_rate )
#seg_ID_rate3vsGR <- filter(seg_ID, P_Rates == rate3 | P_Rates== Grower_rate )
#I want a list of all values in segment ID to uss in the loop
list <- unique(seg_ID_rate1vsGR$SegmentID)
############################################################################################################
##2a. Run as a loop for test 1 rate 1 vs GR
Output_rate1vsGR= data.frame() #create empty df for output
for (i in list){
segment_data = subset(seg_ID_rate1vsGR, SegmentID == i)
# Method 1: The data are saved in two different numeric vectors.
data_x=subset(segment_data, Rates==Grower_rate, select = Yld, drop = TRUE)
data_y=subset(segment_data, Rates==rate1, select = Yld, drop = TRUE)
res_method1 <-t.test(data_x,data_y, var.equal = FALSE)
p_vlaue <- res_method1$p.value
segment_name <- unique(segment_data$SegmentID)
result <- data.frame(SegmentID = segment_name, P_value = p_vlaue)
Output_rate1vsGR = rbind(Output_rate1vsGR, result)
}
#convert the P value into NS or Sig at 0.05
Output_rate1vsGR <- mutate(Output_rate1vsGR,
Significant = case_when(
P_value < 0.05 ~ "significant",
TRUE ~ "not significant"
))
#To make this meaningful I need to summaries the input data and join it to the t - test results
head(Output_rate1vsGR)
seg_ID_rate1vsGR_summary <- group_by(seg_ID_rate1vsGR,
SegmentID, Zone, Rates ) %>%
summarise_all(mean) %>%
ungroup()
#join output to summary
#what comparison did I run? - name the df to reflect this
seg_ID_rate1vsGR_summary <- left_join(seg_ID_rate1vsGR_summary, Output_rate1vsGR)
seg_ID_rate1vsGR_summary <- mutate(seg_ID_rate1vsGR_summary, comparison = "rate1vsGR" )
seg_ID_rate1vsGR_summary
#####################################################################################################
##2b.Run as a loop for test 2 rate 2 vs GR
Output_rate2vsGR= data.frame() #create empty df for output
for (i in list){
segment_data = subset(seg_ID_rate2vsGR, SegmentID == i)
# Method 1: The data are saved in two different numeric vectors.
data_x=subset(segment_data, Rates==rate2, select = Yld, drop = TRUE)
data_y=subset(segment_data, Rates==Grower_rate, select = Yld, drop = TRUE)
res_method1 <-t.test(data_x,data_y, var.equal = FALSE)
p_vlaue <- res_method1$p.value
segment_name <- unique(segment_data$SegmentID)
result <- data.frame(SegmentID = segment_name, P_value = p_vlaue)
Output_rate2vsGR = rbind(Output_rate2vsGR, result)
}
#convert the P value into NS or Sig at 0.05
Output_rate2vsGR <- mutate(Output_rate2vsGR,
Significant = case_when(
P_value < 0.05 ~ "significant",
TRUE ~ "not significant"
))
#To make this meaningful I need to summaries the input data and join it to the t - test results
seg_ID_rate2vsGR_summary <- group_by(seg_ID_rate2vsGR,
SegmentID, Zone, Rates ) %>%
summarise_all(mean) %>%
ungroup()
#join output to summary
seg_ID_rate2vsGR_summary <- left_join(seg_ID_rate2vsGR_summary, Output_rate2vsGR)
#what comparison did I run? - name the df to reflect this
seg_ID_rate2vsGR_summary <- mutate(seg_ID_rate2vsGR_summary, comparison = "rate2vsGR" )
#####################################################################################################
##2c.Run as a loop for test 3 rate 3 vs GR
Output_rate3vsGR= data.frame() #create empty df for output
for (i in list){
segment_data = subset(seg_ID_rate3vsGR, SegmentID == i)
# Method 1: The data are saved in two different numeric vectors.
data_x=subset(segment_data, Rates==rate3, select = Yld, drop = TRUE)
data_y=subset(segment_data, Rates==Grower_rate, select = Yld, drop = TRUE)
res_method1 <-t.test(data_x,data_y, var.equal = FALSE)
p_vlaue <- res_method1$p.value
segment_name <- unique(segment_data$SegmentID)
result <- data.frame(SegmentID = segment_name, P_value = p_vlaue)
Output_rate3vsGR = rbind(Output_rate3vsGR, result)
}
#convert the P value into NS or Sig at 0.05
Output_rate3vsGR <- mutate(Output_rate3vsGR,
Significant = case_when(
P_value < 0.05 ~ "significant",
TRUE ~ "not significant"
))
#To make this meaningful I need to summaries the input data and join it to the t - test results
seg_ID_rate3vsGR_summary <- group_by(seg_ID_rate3vsGR,
SegmentID, Zone, Rates ) %>%
summarise_all(mean) %>%
ungroup()
#join output to summary
seg_ID_rate3vsGR_summary <- left_join(seg_ID_rate3vsGR_summary, Output_rate3vsGR)
#what comparison did I run? - name the df to reflect this
seg_ID_rate3vsGR_summary <- mutate(seg_ID_rate3vsGR_summary, comparison = "rate3vsGR" )
###############################################################################################################
##2d. Join the two strip data results togther join info from test 1 to test 2 and test 3
head(seg_ID_rate1vsGR_summary)
head(seg_ID_rate2vsGR_summary)
#head(seg_ID_rate3vsGR_summary)
seg_ID_t_test_summary <- rbind(seg_ID_rate1vsGR_summary, seg_ID_rate2vsGR_summary)
#seg_ID_t_test_summary <- rbind(seg_ID_rate1vsGR_summary, seg_ID_rate2vsGR_summary, seg_ID_rate3vsGR_summary)
###remove some of the data from my workspace
rm(list = c("Output_rate1vsGR",
"Output_rate2vsGR",
"seg_ID_rate1vsGR_summary",
"seg_ID_rate2vsGR_summary",
"res_method1",
"result",
"seg_ID_rate1vsGR",
"seg_ID_rate2vsGR"
))
##############################################################################################################
##3a. plot results of t.test ########################################################################
seg_ID_t_test_summary$P_Rate_as_factor <- as.factor(seg_ID_t_test_summary$Rates)
##3aa - define some parameters for the graph - set the zone bands on the graph.
#Zone1
zone1_min <- filter(seg_ID_t_test_summary, Zone == zone1) %>%
summarise(min_zone = min(SegmentID))
zone1_min <- zone1_min[[1]]
zone1_max <- filter(seg_ID_t_test_summary, Zone == zone1) %>%
summarise(max_zone = max(SegmentID))
zone1_max <- zone1_max[[1]]
#Zone2
zone2_min <- filter(seg_ID_t_test_summary, Zone == zone2) %>%
summarise(min_zone = min(SegmentID))
zone2_min <- zone2_min[[1]]
zone2_max <- filter(seg_ID_t_test_summary, Zone == zone2) %>%
summarise(max_zone = max(SegmentID))
zone2_max <- zone2_max[[1]]
##3b. Plot the results
segments <- ggplot(seg_ID_t_test_summary, aes(SegmentID , Yld, group = P_Rate_as_factor))+
geom_line(size=1, alpha=0.4, aes( color = P_Rate_as_factor ))+
scale_color_manual(values=c('darkgrey','green', 'blue', 'red'), name = Fert_legend_name)+
theme_bw()+
ylim(0.0,6)+
labs(x= "Distance along the strip",
y = "Yield t/ha",
title = "",
subtitle = "",
caption = "")+
annotate("rect", xmin = zone1_min, xmax = zone1_max, ymin = 0, ymax = 6, #Zone 1
alpha = .2) +
annotate("text", x = 85, y= 1,label = zone1)+
annotate("rect", xmin =zone2_min , xmax = zone2_max, ymin = 0, ymax = 6, #zone 2
alpha = .2)+
annotate("text", x = 57, y= 1,label = zone2)
#+
# annotate("text", x = 40, y= 1,label = "Missing data")
##3c. Save the results of the segment work
segments #this is the graph
ggsave(path= graph_path, filename = "t-test_segments.png", device = "png" ,
width = 20, height = 10, units = "cm")
write.csv(seg_ID_t_test_summary, paste0(graph_path,"/t_test_segments.csv"))
###################################################################################################################################
##4a. Paired t test for zone strip Zone 1 ####
##average the yield values in each line segment - this ensure I have the same number of points
# filter out data so we just have zone 1
zone_1 <- filter(seg_ID, Zone == zone1 )
zone_av_1 <- group_by(zone_1,SegmentID, Rates ) %>%
summarise_all(mean)
#subset the zone 1 data
zone_av_1_rate1vsGR <- filter(zone_av_1, Rates == rate1 | Rates== Grower_rate )
zone_av_1_rate2vsGR <- filter(zone_av_1, Rates == rate2 | Rates== Grower_rate )
#zone_av_1_rate3vsGR <- filter(zone_av_1, Rates == rate3 | Rates== Grower_rate )
#ensure that the dataset is duplictaed
list_SegmentID_values <- zone_av_1_rate1vsGR$SegmentID[duplicated(zone_av_1_rate1vsGR$SegmentID)] #this returns a list of values I want to keep
zone_av_1_rate1vsGR <- zone_av_1_rate1vsGR %>% filter(SegmentID %in% list_SegmentID_values)
list_SegmentID_values <- zone_av_1_rate2vsGR$SegmentID[duplicated(zone_av_1_rate2vsGR$SegmentID)] #this returns a list of values I want to keep
zone_av_1_rate2vsGR <- zone_av_1_rate2vsGR %>% filter(SegmentID %in% list_SegmentID_values)
#run the paired t test
zone_av_1_rate1vsGR_res <- t.test(Yld ~ Rates, data = zone_av_1_rate1vsGR, paired = TRUE)
zone_av_1_rate2vsGR_res <- t.test(Yld ~ Rates, data = zone_av_1_rate2vsGR, paired = TRUE)
#zone_av_1_rate3vsGR_res <- t.test(Yld ~ Rates, data = zone_av_1_rate3vsGR, paired = TRUE)
#####test 1 results
# Report values from the t.test
zone_av_1_rate1vsGR_res_sig <-
data.frame(P_value = as.double(zone_av_1_rate1vsGR_res$p.value),
Mean_diff = (zone_av_1_rate1vsGR_res$estimate)) %>%
mutate(
rate_name = "rate1",
rounded = abs(round(Mean_diff, 2)),
Significant = case_when(P_value < 0.05 ~ "significant",
TRUE ~ "not significant"))
zone_av_1_rate1vsGR_res_sig
####test 2 results
# Report values from the t.test
zone_av_1_rate2vsGR_res_sig <-
data.frame(P_value = as.double(zone_av_1_rate2vsGR_res$p.value),
Mean_diff = (zone_av_1_rate2vsGR_res$estimate)) %>%
mutate(
rate_name = "rate2",
rounded = abs(round(Mean_diff, 2)),
Significant = case_when(P_value < 0.05 ~ "significant",
TRUE ~ "not significant"))
####test 3 results
# Report values from the t.test
# zone_av_1_rate3vsGR_res
# #Report values from the t.test
# zone_av_1_rate3vsGR_res_sig <-
# data.frame(P_value = as.double(zone_av_1_rate3vsGR_res$p.value),
# Mean_diff = (zone_av_1_rate3vsGR_res$estimate)) %>%
# mutate(
# rate_name = "rate3",
# rounded = abs(round(Mean_diff, 2)),
# Significant = case_when(P_value < 0.05 ~ "significant",
# TRUE ~ "not significant"))
zone_av_1_rate1vsGR_res_sig
zone_av_1_rate2vsGR_res_sig
#zone_av_1_rate3vsGR_res_sig
# positive_negative_rate1_GRS <-
mean_zone_av_1 <- group_by(zone_av_1, Rates) %>%
summarise(mean(Yld))
mean_zone_av_1
positive_neg_value_GR_rate1_zone1 <- ifelse(filter(mean_zone_av_1, Rates == Grower_rate)
- filter(mean_zone_av_1, Rates == rate1)>0, "plus", "minus")
positive_neg_value_GR_rate1_zone1 <- positive_neg_value_GR_rate1_zone1[1,2]
positive_neg_value_rate2_GR_zone1 <- ifelse(filter(mean_zone_av_1, Rates == rate2)
- filter(mean_zone_av_1, Rates == Grower_rate)>0, "plus", "minus")
positive_neg_value_rate2_GR_zone1 <- positive_neg_value_rate2_GR_zone1[1,2]
# positive_neg_value_rate3_GR_zone1 <- ifelse(filter(mean_zone_av_1, Rates == rate3)
# - filter(mean_zone_av_1, Rates == Grower_rate)>0, "plus", "minus")
# positive_neg_value_rate3_GR_zone1 <- positive_neg_value_rate3_GR_zone1[1,2]
p_vlaue_text_zone_1 <- paste0("Yield at N ", Grower_rate, " is N ", rate1, " " ,positive_neg_value_GR_rate1_zone1, " ",
zone_av_1_rate1vsGR_res_sig$rounded, " and is ",
zone_av_1_rate1vsGR_res_sig$Significant, "\n",
"Yield at N ", rate2, " is N ", Grower_rate, " " ,positive_neg_value_rate2_GR_zone1, " ",
zone_av_1_rate2vsGR_res_sig$rounded, " and is ",
zone_av_1_rate2vsGR_res_sig$Significant, collapse = "\n")
# "Yield at P ", rate3, " is P ", Grower_rate , " " ,positive_neg_value_rate3_GR_zone1, " ",
# zone_av_1_rate3vsGR_res_sig$rounded, " and is ",
# zone_av_1_rate3vsGR_res_sig$Significant, collapse = "\n")
print(p_vlaue_text_zone_1)
library(grid)
Pvalue_on_graph <- grobTree(textGrob(p_vlaue_text_zone_1, x=0.1, y=0.10, hjust=0,
gp=gpar(col="black", fontsize=6, fontface="italic")))
# Plot the results
zone_av_1
zone_av_1$Rate_as_factor <- as.factor(zone_av_1$Rates)
zone_1 <- ggplot( zone_av_1, aes(Rate_as_factor, Yld))+
geom_boxplot(alpha=0.1)+
geom_point(colour = "blue", alpha = 0.1)+
stat_summary(fun.y = mean, geom = "errorbar", aes(ymax = ..y.., ymin = ..y..),
width = .75, linetype = "dashed")+
theme_bw()+
ylim(2.5,6)+
theme(axis.text=element_text(size=8),
axis.title=element_text(size=10,))+
labs(x = Fert_legend_name,
y= "Yield t/ha",
title = zone1)+
annotation_custom(Pvalue_on_graph)
zone_1
##save the graphs of the zone strip work
ggsave(path= graph_path, filename = "t-test_zone_zone1_strip.png", device = "png" ,
width = 20, height = 10, units = "cm")
#make a table of the mean yield for zones with t test reuslts
zone_av_1
mean_zone_av_1 <- group_by(zone_av_1, Rates) %>%
summarise(mean(Yld))
mean_zone_av_1 <- left_join(mean_zone_av_1,list_rates)
mean_zone_av_1and_res_sig <- rbind(zone_av_1_rate1vsGR_res_sig, zone_av_1_rate2vsGR_res_sig)
mean_zone_av_1 <- left_join(mean_zone_av_1,mean_zone_av_1and_res_sig)
mean_zone_av_1 <- mutate(mean_zone_av_1,
Zone = zone1,
Organisation =Organisation_db,
Contact_Farmer = Contact_Farmer_db,
Paddock_tested = Paddock_tested_db)
names(mean_zone_av_1)[2] <- "Yld"
write.csv(zone_av_1, paste0(graph_path,"/t_testzone_zone1_av.csv"))
###########################################################################################################################################
##4b. Paired t test for zone strip Zone 2 ####
##average the yield values in each line segment - this ensure I have the same number of points
# filter out data so we just have zone 2
zone_2 <- filter(seg_ID, Zone == zone2 )
zone_2
zone_av_2 <- group_by(zone_2,SegmentID, Rates ) %>%
summarise_all(mean)
#subset the zone 1 data
zone_av_2_rate1vsGR <- filter(zone_av_2, Rates == rate1 | Rates== Grower_rate )
zone_av_2_rate2vsGR <- filter(zone_av_2, Rates == rate2 | Rates== Grower_rate )
#zone_av_2_rate3vsGR <- filter(zone_av_2, Rates == rate3 | Rates== Grower_rate )
#ensure that the dataset is duplictaed
list_SegmentID_values <- zone_av_2_rate1vsGR$SegmentID[duplicated(zone_av_2_rate1vsGR$SegmentID)] #this returns a list of values I want to keep
zone_av_2_rate1vsGR <- zone_av_2_rate1vsGR %>% filter(SegmentID %in% list_SegmentID_values)
list_SegmentID_values <- zone_av_2_rate2vsGR$SegmentID[duplicated(zone_av_2_rate2vsGR$SegmentID)] #this returns a list of values I want to keep
zone_av_2_rate2vsGR <- zone_av_2_rate2vsGR %>% filter(SegmentID %in% list_SegmentID_values)
#run the paired t test
zone_av_2_rate1vsGR_res <- t.test(Yld ~ Rates, data = zone_av_2_rate1vsGR, paired = TRUE)
zone_av_2_rate2vsGR_res <- t.test(Yld ~ Rates, data = zone_av_2_rate2vsGR, paired = TRUE)
#zone_av_2_rate3vsGR_res <- t.test(Yld ~ Rates, data = zone_av_2_rate3vsGR, paired = TRUE)
#####test 1 results
# Report values from the t.test
zone_av_2_rate1vsGR_res_sig <-
data.frame(P_value = as.double(zone_av_2_rate1vsGR_res$p.value),
Mean_diff = (zone_av_2_rate1vsGR_res$estimate)) %>%
mutate(
rate_name = "rate1",
rounded = abs(round(Mean_diff, 2)),
Significant = case_when(P_value < 0.05 ~ "significant",
TRUE ~ "not significant"))
zone_av_2_rate1vsGR_res_sig
####test 2 results
# Report values from the t.test
zone_av_2_rate2vsGR_res_sig <-
data.frame(P_value = as.double(zone_av_2_rate2vsGR_res$p.value),
Mean_diff = (zone_av_2_rate2vsGR_res$estimate)) %>%
mutate(
rate_name = "rate2",
rounded = abs(round(Mean_diff, 2)),
Significant = case_when(P_value < 0.05 ~ "significant",
TRUE ~ "not significant"))
####test 3 results
# Report values from the t.test
# zone_av_2_rate3vsGR_res
# #Report values from the t.test
# zone_av_2_rate3vsGR_res_sig <-
# data.frame(P_value = as.double(zone_av_2_rate3vsGR_res$p.value),
# Mean_diff = (zone_av_2_rate3vsGR_res$estimate)) %>%
# mutate(
# rate_name = "rate3",
# rounded = abs(round(Mean_diff, 2)),
# Significant = case_when(P_value < 0.05 ~ "significant",
# TRUE ~ "not significant"))
zone_av_2_rate1vsGR_res_sig
zone_av_2_rate2vsGR_res_sig
#zone_av_2_rate3vsGR_res_sig
# positive_negative_rate1_GRS <-
mean_zone_av_2 <- group_by(zone_av_2, Rates) %>%
summarise(mean(Yld))
mean_zone_av_2
positive_neg_value_GR_rate1_zone2 <- ifelse(filter(mean_zone_av_2, Rates == Grower_rate)
- filter(mean_zone_av_2, Rates == rate1)>0, "plus", "minus")
positive_neg_value_GR_rate1_zone2 <- positive_neg_value_GR_rate1_zone2[1,2]
positive_neg_value_rate2_GR_zone2 <- ifelse(filter(mean_zone_av_2, Rates == rate2)
- filter(mean_zone_av_2, Rates == Grower_rate)>0, "plus", "minus")
positive_neg_value_rate2_GR_zone2 <- positive_neg_value_rate2_GR_zone2[1,2]
# positive_neg_value_rate3_GR_zone2 <- ifelse(filter(mean_zone_av_2, Rates == rate3)
# - filter(mean_zone_av_2, Rates == Grower_rate)>0, "plus", "minus")
# positive_neg_value_rate3_GR_zone2 <- positive_neg_value_rate3_GR_zone2[1,2]
p_vlaue_text_zone_2 <- paste0("Yield at N ", Grower_rate, " is N ", rate1, " " ,positive_neg_value_GR_rate1_zone2, " ",
zone_av_2_rate1vsGR_res_sig$rounded, " and is ",
zone_av_2_rate1vsGR_res_sig$Significant, "\n",
"Yield at N ", rate2, " is N ", Grower_rate, " " ,positive_neg_value_rate2_GR_zone2, " ",
zone_av_2_rate2vsGR_res_sig$rounded, " and is ",
zone_av_2_rate2vsGR_res_sig$Significant, collapse = "\n")
# "Yield at P ", rate3, " is P ", Grower_rate , " " ,positive_neg_value_rate3_GR_zone2, " ",
# zone_av_2_rate3vsGR_res_sig$rounded, " and is ",
# zone_av_2_rate3vsGR_res_sig$Significant, collapse = "\n")
print(p_vlaue_text_zone_2)
library(grid)
Pvalue_on_graph <- grobTree(textGrob(p_vlaue_text_zone_2, x=0.1, y=0.10, hjust=0,
gp=gpar(col="black", fontsize=6, fontface="italic")))
# Plot the results
zone_av_2
zone_av_2$Rate_as_factor <- as.factor(zone_av_2$Rates)
zone_2 <- ggplot( zone_av_2, aes(Rate_as_factor, Yld))+
geom_boxplot(alpha=0.1)+
geom_point(colour = "blue", alpha = 0.1)+
stat_summary(fun.y = mean, geom = "errorbar", aes(ymax = ..y.., ymin = ..y..),
width = .75, linetype = "dashed")+
theme_bw()+
ylim(2.5,6)+
theme(axis.text=element_text(size=8),
axis.title=element_text(size=10,))+
labs(x = Fert_legend_name,
y= "Yield t/ha",
title = zone2)+
annotation_custom(Pvalue_on_graph)
zone_2
##save the graphs of the zone strip work
ggsave(path= graph_path, filename = "t-test_zone_zone2_strip.png", device = "png" ,
width = 20, height = 10, units = "cm")
#make a table of the mean yield for zones with t test reuslts
zone_av_2
mean_zone_av_2 <- group_by(zone_av_2, Rates) %>%
summarise(mean(Yld))
mean_zone_av_2 <- left_join(mean_zone_av_2,list_rates)
mean_zone_av_2and_res_sig <- rbind(zone_av_2_rate1vsGR_res_sig, zone_av_2_rate2vsGR_res_sig)
mean_zone_av_2 <- left_join(mean_zone_av_2,mean_zone_av_2and_res_sig)
mean_zone_av_2 <- mutate(mean_zone_av_2,
Zone = zone2,
Organisation =Organisation_db,
Contact_Farmer = Contact_Farmer_db,
Paddock_tested = Paddock_tested_db)
names(mean_zone_av_2)[2] <- "Yld"
write.csv(zone_av_2, paste0(graph_path,"/t_testzone_zone2_av.csv"))
#####################################################################################################################################
### should get this from harms database
#bring in data from the most current database from dropbox
#set up access to dropbox when is is password protected
token<-drop_auth()
saveRDS(token, "droptoken.rds")
token<-readRDS("droptoken.rds")
drop_acc(dtoken=token)
#https://www.dropbox.com/home/GRDC_Soil_Plant_Testing_Database
drop_download(path = "GRDC_Soil_Plant_Testing_Database/NP_database_28022020.xlsx",
local_path = database_name_of_path,
dtoken = token)
#bring in the excel sheet as a r object
database_name_of_path
harm_database <- read_excel(paste0(
database_name_of_path,"/", "NP_database_28022020.xlsx"),
sheet = "2019 full data", range = cell_cols("A:O"))
str(harm_database)
#fix up some names
harm_database<-
dplyr::select(harm_database,
"Paddock_code" = `Paddock code`,
Contact, Farmer,
"Paddock_tested" = `Paddock tested`,
Zone ,
Colwell,
DGT,
PBI ,
`Total N`,
`Colwell rec rate`,
`DGT rec rate`)
#remove the row that is missing..
harm_database <-filter(harm_database, Paddock_code != "NA")
## Pull out the infor for the paddock I am testing..
str(harm_database)
site <- filter(harm_database,
Paddock_tested == Zone_db) %>%
dplyr::select(5, 6: 11)
Zone_db
site
#make a table of the mean yield for zones
mean_zone_av_1
mean_zone_av_2
mean_zone_av_1_2 <- as.data.frame( rbind(mean_zone_av_1, mean_zone_av_2))
write.csv(mean_zone_av_1_2, paste0(graph_path,"/mean_zone_av_1_2.csv"))
mean_zone_av_1_2_display <- dplyr::select(mean_zone_av_1_2,
Rates,
Yld,
Zone)
mean_zone_av_1_2_display
mean_zone_av_1_2_display <- spread(mean_zone_av_1_2_display, Zone, Yld)
mean_zone_av_1_2_display <- round(mean_zone_av_1_2_display,2)
TSpecial <- ttheme_minimal(base_size = 8)
table1 <- tableGrob(site , rows = NULL, theme=TSpecial )
table2 <- tableGrob(mean_zone_av_1_2_display, rows = NULL, theme=TSpecial)
#get the name of the paddock...
paddock <- Zone_db
library(DT)
test <- textGrob(paddock)
####################################################################################################################################
## Arrange the outputs onto one page
segments
zone_1
zone_2
paddock
collection <- grid.arrange(zone_2, zone_1, table1, segments, table2, nrow = 5,
layout_matrix = cbind(c(1,1,5,4,4), c(2,2,3,4,4)))
collection
ggsave(path= graph_path, filename = paste0(paddock, "_collection.png"), device = "png",
width = 21, height = 15, units = "cm", collection)
##########################################################################################################################################
| /Landmark/Matt_Nihill/Jenharwil_2/James_tidy.R | no_license | JackieOuzman/strip_graphs | R | false | false | 26,988 | r |
#################################################################################################
###This code will generate grower reports by:
#1.bring in the strip data for a site
#2.run paired t-test,
#3.create plots
#4.Accesses lab results and generates reports
## Approach using polygon to pull out raw yield data
###############################################################################################
### load in the libraries
#install.packages("PairedData")
#install.packages("RGraphics")
#install.packages("gridExtra")
#install.packages("rdrop2")
library(dplyr)
library(tidyverse)
library(ggplot2)
library(readxl)
library(PairedData)
library(cowplot)
library(grid)
library(RGraphics)
library(gridExtra)
library(rdrop2)
###############################################################################################
##1a. Details about the site what it looks like in the database
database_name_of_path <-
file.path(
"W:",
"value_soil_testing_prj",
"data_base")
Organisation_db = "Landmark"
Contact_Farmer_db = "Matt Nihill 7"
Paddock_tested_db = "Jenharwil 2"
Zone_db = "James 2"
data_file = "James_Yld_Seg_ID_zone.csv"
Fert_legend_name <- "N Rates"
##1b. set path for getting my spatial data and location of saving outputs
name_of_path <-
file.path(
"W:",
"value_soil_testing_prj",
"Yield_data",
Organisation_db,
"Matt_Nihill",
"Jenharwil_2",
"James 2")
graph_path <-
file.path(name_of_path)
seg_ID <- read_csv(paste0(name_of_path, "/", data_file))
names(seg_ID)
##1c. make name consistant
seg_ID <-
rename(seg_ID,
"Rates" = "Rate", #new name = old name
"Zone" = "Zone",
"Yld" = "Yld_Mass_D"
)
##1c. Set up data so its generic growers rate, rate1, rate2, rate3, zone1, zone2
#Define the rates
unique(seg_ID$Rates)
Grower_rate = 80
rate1 = 0
rate2 = 150
#rate3 = 110
list_rates <- data.frame( rate_name = c("Grower_rate" , "rate1", "rate2"),
Rates = c(Grower_rate,rate1, rate2 ) )
list_rates
#Define the zones
unique(seg_ID$Zone)
zone1 <- "Low"
zone2 <- "High"
############################################################################################################################
### clean the data removing zero values
## remove all the values in the data set that won't be included in the analysis this is when distance on line = 0
seg_ID <- filter(seg_ID,
DistOnLine != 0)
#The farmer practice wasnt really a true strip but I want to use the data so I need to remove row when we have no yield
seg_ID <- filter(seg_ID,
Yld != 0)
#############################################################################################################################
##2. t test per segment in the strip Via Andrea method####
#Prep the data so I can check what am I testing (look at Harms list)
seg_ID_rate1vsGR <- filter(seg_ID, Rates == rate1 | Rates== Grower_rate )
seg_ID_rate2vsGR <- filter(seg_ID, Rates == rate2 | Rates== Grower_rate )
#seg_ID_rate3vsGR <- filter(seg_ID, P_Rates == rate3 | P_Rates== Grower_rate )
#I want a list of all values in segment ID to uss in the loop
list <- unique(seg_ID_rate1vsGR$SegmentID)
############################################################################################################
##2a. Run as a loop for test 1 rate 1 vs GR
Output_rate1vsGR= data.frame() #create empty df for output
for (i in list){
segment_data = subset(seg_ID_rate1vsGR, SegmentID == i)
# Method 1: The data are saved in two different numeric vectors.
data_x=subset(segment_data, Rates==Grower_rate, select = Yld, drop = TRUE)
data_y=subset(segment_data, Rates==rate1, select = Yld, drop = TRUE)
res_method1 <-t.test(data_x,data_y, var.equal = FALSE)
p_vlaue <- res_method1$p.value
segment_name <- unique(segment_data$SegmentID)
result <- data.frame(SegmentID = segment_name, P_value = p_vlaue)
Output_rate1vsGR = rbind(Output_rate1vsGR, result)
}
#convert the P value into NS or Sig at 0.05
Output_rate1vsGR <- mutate(Output_rate1vsGR,
Significant = case_when(
P_value < 0.05 ~ "significant",
TRUE ~ "not significant"
))
#To make this meaningful I need to summaries the input data and join it to the t - test results
head(Output_rate1vsGR)
seg_ID_rate1vsGR_summary <- group_by(seg_ID_rate1vsGR,
SegmentID, Zone, Rates ) %>%
summarise_all(mean) %>%
ungroup()
#join output to summary
#what comparison did I run? - name the df to reflect this
seg_ID_rate1vsGR_summary <- left_join(seg_ID_rate1vsGR_summary, Output_rate1vsGR)
seg_ID_rate1vsGR_summary <- mutate(seg_ID_rate1vsGR_summary, comparison = "rate1vsGR" )
seg_ID_rate1vsGR_summary
#####################################################################################################
##2b.Run as a loop for test 2 rate 2 vs GR
Output_rate2vsGR= data.frame() #create empty df for output
for (i in list){
segment_data = subset(seg_ID_rate2vsGR, SegmentID == i)
# Method 1: The data are saved in two different numeric vectors.
data_x=subset(segment_data, Rates==rate2, select = Yld, drop = TRUE)
data_y=subset(segment_data, Rates==Grower_rate, select = Yld, drop = TRUE)
res_method1 <-t.test(data_x,data_y, var.equal = FALSE)
p_vlaue <- res_method1$p.value
segment_name <- unique(segment_data$SegmentID)
result <- data.frame(SegmentID = segment_name, P_value = p_vlaue)
Output_rate2vsGR = rbind(Output_rate2vsGR, result)
}
#convert the P value into NS or Sig at 0.05
Output_rate2vsGR <- mutate(Output_rate2vsGR,
Significant = case_when(
P_value < 0.05 ~ "significant",
TRUE ~ "not significant"
))
#To make this meaningful I need to summaries the input data and join it to the t - test results
seg_ID_rate2vsGR_summary <- group_by(seg_ID_rate2vsGR,
SegmentID, Zone, Rates ) %>%
summarise_all(mean) %>%
ungroup()
#join output to summary
seg_ID_rate2vsGR_summary <- left_join(seg_ID_rate2vsGR_summary, Output_rate2vsGR)
#what comparison did I run? - name the df to reflect this
seg_ID_rate2vsGR_summary <- mutate(seg_ID_rate2vsGR_summary, comparison = "rate2vsGR" )
#####################################################################################################
##2c.Run as a loop for test 3 rate 3 vs GR
Output_rate3vsGR= data.frame() #create empty df for output
for (i in list){
segment_data = subset(seg_ID_rate3vsGR, SegmentID == i)
# Method 1: The data are saved in two different numeric vectors.
data_x=subset(segment_data, Rates==rate3, select = Yld, drop = TRUE)
data_y=subset(segment_data, Rates==Grower_rate, select = Yld, drop = TRUE)
res_method1 <-t.test(data_x,data_y, var.equal = FALSE)
p_vlaue <- res_method1$p.value
segment_name <- unique(segment_data$SegmentID)
result <- data.frame(SegmentID = segment_name, P_value = p_vlaue)
Output_rate3vsGR = rbind(Output_rate3vsGR, result)
}
#convert the P value into NS or Sig at 0.05
Output_rate3vsGR <- mutate(Output_rate3vsGR,
Significant = case_when(
P_value < 0.05 ~ "significant",
TRUE ~ "not significant"
))
#To make this meaningful I need to summaries the input data and join it to the t - test results
seg_ID_rate3vsGR_summary <- group_by(seg_ID_rate3vsGR,
SegmentID, Zone, Rates ) %>%
summarise_all(mean) %>%
ungroup()
#join output to summary
seg_ID_rate3vsGR_summary <- left_join(seg_ID_rate3vsGR_summary, Output_rate3vsGR)
#what comparison did I run? - name the df to reflect this
seg_ID_rate3vsGR_summary <- mutate(seg_ID_rate3vsGR_summary, comparison = "rate3vsGR" )
###############################################################################################################
##2d. Join the two strip data results togther join info from test 1 to test 2 and test 3
head(seg_ID_rate1vsGR_summary)
head(seg_ID_rate2vsGR_summary)
#head(seg_ID_rate3vsGR_summary)
seg_ID_t_test_summary <- rbind(seg_ID_rate1vsGR_summary, seg_ID_rate2vsGR_summary)
#seg_ID_t_test_summary <- rbind(seg_ID_rate1vsGR_summary, seg_ID_rate2vsGR_summary, seg_ID_rate3vsGR_summary)
###remove some of the data from my workspace
rm(list = c("Output_rate1vsGR",
"Output_rate2vsGR",
"seg_ID_rate1vsGR_summary",
"seg_ID_rate2vsGR_summary",
"res_method1",
"result",
"seg_ID_rate1vsGR",
"seg_ID_rate2vsGR"
))
##############################################################################################################
##3a. plot results of t.test ########################################################################
seg_ID_t_test_summary$P_Rate_as_factor <- as.factor(seg_ID_t_test_summary$Rates)
##3aa - define some parameters for the graph - set the zone bands on the graph.
#Zone1
zone1_min <- filter(seg_ID_t_test_summary, Zone == zone1) %>%
summarise(min_zone = min(SegmentID))
zone1_min <- zone1_min[[1]]
zone1_max <- filter(seg_ID_t_test_summary, Zone == zone1) %>%
summarise(max_zone = max(SegmentID))
zone1_max <- zone1_max[[1]]
#Zone2
zone2_min <- filter(seg_ID_t_test_summary, Zone == zone2) %>%
summarise(min_zone = min(SegmentID))
zone2_min <- zone2_min[[1]]
zone2_max <- filter(seg_ID_t_test_summary, Zone == zone2) %>%
summarise(max_zone = max(SegmentID))
zone2_max <- zone2_max[[1]]
##3b. Plot the results
segments <- ggplot(seg_ID_t_test_summary, aes(SegmentID , Yld, group = P_Rate_as_factor))+
geom_line(size=1, alpha=0.4, aes( color = P_Rate_as_factor ))+
scale_color_manual(values=c('darkgrey','green', 'blue', 'red'), name = Fert_legend_name)+
theme_bw()+
ylim(0.0,6)+
labs(x= "Distance along the strip",
y = "Yield t/ha",
title = "",
subtitle = "",
caption = "")+
annotate("rect", xmin = zone1_min, xmax = zone1_max, ymin = 0, ymax = 6, #Zone 1
alpha = .2) +
annotate("text", x = 85, y= 1,label = zone1)+
annotate("rect", xmin =zone2_min , xmax = zone2_max, ymin = 0, ymax = 6, #zone 2
alpha = .2)+
annotate("text", x = 57, y= 1,label = zone2)
#+
# annotate("text", x = 40, y= 1,label = "Missing data")
##3c. Save the results of the segment work
segments #this is the graph
ggsave(path= graph_path, filename = "t-test_segments.png", device = "png" ,
width = 20, height = 10, units = "cm")
write.csv(seg_ID_t_test_summary, paste0(graph_path,"/t_test_segments.csv"))
###################################################################################################################################
##4a. Paired t test for zone strip Zone 1 ####
##average the yield values in each line segment - this ensure I have the same number of points
# filter out data so we just have zone 1
zone_1 <- filter(seg_ID, Zone == zone1 )
zone_av_1 <- group_by(zone_1,SegmentID, Rates ) %>%
summarise_all(mean)
#subset the zone 1 data
zone_av_1_rate1vsGR <- filter(zone_av_1, Rates == rate1 | Rates== Grower_rate )
zone_av_1_rate2vsGR <- filter(zone_av_1, Rates == rate2 | Rates== Grower_rate )
#zone_av_1_rate3vsGR <- filter(zone_av_1, Rates == rate3 | Rates== Grower_rate )
#ensure that the dataset is duplictaed
list_SegmentID_values <- zone_av_1_rate1vsGR$SegmentID[duplicated(zone_av_1_rate1vsGR$SegmentID)] #this returns a list of values I want to keep
zone_av_1_rate1vsGR <- zone_av_1_rate1vsGR %>% filter(SegmentID %in% list_SegmentID_values)
list_SegmentID_values <- zone_av_1_rate2vsGR$SegmentID[duplicated(zone_av_1_rate2vsGR$SegmentID)] #this returns a list of values I want to keep
zone_av_1_rate2vsGR <- zone_av_1_rate2vsGR %>% filter(SegmentID %in% list_SegmentID_values)
#run the paired t test
zone_av_1_rate1vsGR_res <- t.test(Yld ~ Rates, data = zone_av_1_rate1vsGR, paired = TRUE)
zone_av_1_rate2vsGR_res <- t.test(Yld ~ Rates, data = zone_av_1_rate2vsGR, paired = TRUE)
#zone_av_1_rate3vsGR_res <- t.test(Yld ~ Rates, data = zone_av_1_rate3vsGR, paired = TRUE)
#####test 1 results
# Report values from the t.test
zone_av_1_rate1vsGR_res_sig <-
data.frame(P_value = as.double(zone_av_1_rate1vsGR_res$p.value),
Mean_diff = (zone_av_1_rate1vsGR_res$estimate)) %>%
mutate(
rate_name = "rate1",
rounded = abs(round(Mean_diff, 2)),
Significant = case_when(P_value < 0.05 ~ "significant",
TRUE ~ "not significant"))
zone_av_1_rate1vsGR_res_sig
####test 2 results
# Report values from the t.test
zone_av_1_rate2vsGR_res_sig <-
data.frame(P_value = as.double(zone_av_1_rate2vsGR_res$p.value),
Mean_diff = (zone_av_1_rate2vsGR_res$estimate)) %>%
mutate(
rate_name = "rate2",
rounded = abs(round(Mean_diff, 2)),
Significant = case_when(P_value < 0.05 ~ "significant",
TRUE ~ "not significant"))
####test 3 results
# Report values from the t.test
# zone_av_1_rate3vsGR_res
# #Report values from the t.test
# zone_av_1_rate3vsGR_res_sig <-
# data.frame(P_value = as.double(zone_av_1_rate3vsGR_res$p.value),
# Mean_diff = (zone_av_1_rate3vsGR_res$estimate)) %>%
# mutate(
# rate_name = "rate3",
# rounded = abs(round(Mean_diff, 2)),
# Significant = case_when(P_value < 0.05 ~ "significant",
# TRUE ~ "not significant"))
zone_av_1_rate1vsGR_res_sig
zone_av_1_rate2vsGR_res_sig
#zone_av_1_rate3vsGR_res_sig
# positive_negative_rate1_GRS <-
mean_zone_av_1 <- group_by(zone_av_1, Rates) %>%
summarise(mean(Yld))
mean_zone_av_1
positive_neg_value_GR_rate1_zone1 <- ifelse(filter(mean_zone_av_1, Rates == Grower_rate)
- filter(mean_zone_av_1, Rates == rate1)>0, "plus", "minus")
positive_neg_value_GR_rate1_zone1 <- positive_neg_value_GR_rate1_zone1[1,2]
positive_neg_value_rate2_GR_zone1 <- ifelse(filter(mean_zone_av_1, Rates == rate2)
- filter(mean_zone_av_1, Rates == Grower_rate)>0, "plus", "minus")
positive_neg_value_rate2_GR_zone1 <- positive_neg_value_rate2_GR_zone1[1,2]
# positive_neg_value_rate3_GR_zone1 <- ifelse(filter(mean_zone_av_1, Rates == rate3)
# - filter(mean_zone_av_1, Rates == Grower_rate)>0, "plus", "minus")
# positive_neg_value_rate3_GR_zone1 <- positive_neg_value_rate3_GR_zone1[1,2]
p_vlaue_text_zone_1 <- paste0("Yield at N ", Grower_rate, " is N ", rate1, " " ,positive_neg_value_GR_rate1_zone1, " ",
zone_av_1_rate1vsGR_res_sig$rounded, " and is ",
zone_av_1_rate1vsGR_res_sig$Significant, "\n",
"Yield at N ", rate2, " is N ", Grower_rate, " " ,positive_neg_value_rate2_GR_zone1, " ",
zone_av_1_rate2vsGR_res_sig$rounded, " and is ",
zone_av_1_rate2vsGR_res_sig$Significant, collapse = "\n")
# "Yield at P ", rate3, " is P ", Grower_rate , " " ,positive_neg_value_rate3_GR_zone1, " ",
# zone_av_1_rate3vsGR_res_sig$rounded, " and is ",
# zone_av_1_rate3vsGR_res_sig$Significant, collapse = "\n")
print(p_vlaue_text_zone_1)
library(grid)
Pvalue_on_graph <- grobTree(textGrob(p_vlaue_text_zone_1, x=0.1, y=0.10, hjust=0,
gp=gpar(col="black", fontsize=6, fontface="italic")))
# Plot the results
zone_av_1
zone_av_1$Rate_as_factor <- as.factor(zone_av_1$Rates)
zone_1 <- ggplot( zone_av_1, aes(Rate_as_factor, Yld))+
geom_boxplot(alpha=0.1)+
geom_point(colour = "blue", alpha = 0.1)+
stat_summary(fun.y = mean, geom = "errorbar", aes(ymax = ..y.., ymin = ..y..),
width = .75, linetype = "dashed")+
theme_bw()+
ylim(2.5,6)+
theme(axis.text=element_text(size=8),
axis.title=element_text(size=10,))+
labs(x = Fert_legend_name,
y= "Yield t/ha",
title = zone1)+
annotation_custom(Pvalue_on_graph)
zone_1
##save the graphs of the zone strip work
ggsave(path= graph_path, filename = "t-test_zone_zone1_strip.png", device = "png" ,
width = 20, height = 10, units = "cm")
#make a table of the mean yield for zones with t test reuslts
zone_av_1
mean_zone_av_1 <- group_by(zone_av_1, Rates) %>%
summarise(mean(Yld))
mean_zone_av_1 <- left_join(mean_zone_av_1,list_rates)
mean_zone_av_1and_res_sig <- rbind(zone_av_1_rate1vsGR_res_sig, zone_av_1_rate2vsGR_res_sig)
mean_zone_av_1 <- left_join(mean_zone_av_1,mean_zone_av_1and_res_sig)
mean_zone_av_1 <- mutate(mean_zone_av_1,
Zone = zone1,
Organisation =Organisation_db,
Contact_Farmer = Contact_Farmer_db,
Paddock_tested = Paddock_tested_db)
names(mean_zone_av_1)[2] <- "Yld"
write.csv(zone_av_1, paste0(graph_path,"/t_testzone_zone1_av.csv"))
###########################################################################################################################################
##4b. Paired t test for zone strip Zone 2 ####
##average the yield values in each line segment - this ensure I have the same number of points
# filter out data so we just have zone 2
zone_2 <- filter(seg_ID, Zone == zone2 )
zone_2
zone_av_2 <- group_by(zone_2,SegmentID, Rates ) %>%
summarise_all(mean)
#subset the zone 1 data
zone_av_2_rate1vsGR <- filter(zone_av_2, Rates == rate1 | Rates== Grower_rate )
zone_av_2_rate2vsGR <- filter(zone_av_2, Rates == rate2 | Rates== Grower_rate )
#zone_av_2_rate3vsGR <- filter(zone_av_2, Rates == rate3 | Rates== Grower_rate )
#ensure that the dataset is duplictaed
list_SegmentID_values <- zone_av_2_rate1vsGR$SegmentID[duplicated(zone_av_2_rate1vsGR$SegmentID)] #this returns a list of values I want to keep
zone_av_2_rate1vsGR <- zone_av_2_rate1vsGR %>% filter(SegmentID %in% list_SegmentID_values)
list_SegmentID_values <- zone_av_2_rate2vsGR$SegmentID[duplicated(zone_av_2_rate2vsGR$SegmentID)] #this returns a list of values I want to keep
zone_av_2_rate2vsGR <- zone_av_2_rate2vsGR %>% filter(SegmentID %in% list_SegmentID_values)
#run the paired t test
zone_av_2_rate1vsGR_res <- t.test(Yld ~ Rates, data = zone_av_2_rate1vsGR, paired = TRUE)
zone_av_2_rate2vsGR_res <- t.test(Yld ~ Rates, data = zone_av_2_rate2vsGR, paired = TRUE)
#zone_av_2_rate3vsGR_res <- t.test(Yld ~ Rates, data = zone_av_2_rate3vsGR, paired = TRUE)
#####test 1 results
# Report values from the t.test
zone_av_2_rate1vsGR_res_sig <-
data.frame(P_value = as.double(zone_av_2_rate1vsGR_res$p.value),
Mean_diff = (zone_av_2_rate1vsGR_res$estimate)) %>%
mutate(
rate_name = "rate1",
rounded = abs(round(Mean_diff, 2)),
Significant = case_when(P_value < 0.05 ~ "significant",
TRUE ~ "not significant"))
zone_av_2_rate1vsGR_res_sig
####test 2 results
# Report values from the t.test
zone_av_2_rate2vsGR_res_sig <-
data.frame(P_value = as.double(zone_av_2_rate2vsGR_res$p.value),
Mean_diff = (zone_av_2_rate2vsGR_res$estimate)) %>%
mutate(
rate_name = "rate2",
rounded = abs(round(Mean_diff, 2)),
Significant = case_when(P_value < 0.05 ~ "significant",
TRUE ~ "not significant"))
####test 3 results
# Report values from the t.test
# zone_av_2_rate3vsGR_res
# #Report values from the t.test
# zone_av_2_rate3vsGR_res_sig <-
# data.frame(P_value = as.double(zone_av_2_rate3vsGR_res$p.value),
# Mean_diff = (zone_av_2_rate3vsGR_res$estimate)) %>%
# mutate(
# rate_name = "rate3",
# rounded = abs(round(Mean_diff, 2)),
# Significant = case_when(P_value < 0.05 ~ "significant",
# TRUE ~ "not significant"))
zone_av_2_rate1vsGR_res_sig
zone_av_2_rate2vsGR_res_sig
#zone_av_2_rate3vsGR_res_sig
# positive_negative_rate1_GRS <-
mean_zone_av_2 <- group_by(zone_av_2, Rates) %>%
summarise(mean(Yld))
mean_zone_av_2
positive_neg_value_GR_rate1_zone2 <- ifelse(filter(mean_zone_av_2, Rates == Grower_rate)
- filter(mean_zone_av_2, Rates == rate1)>0, "plus", "minus")
positive_neg_value_GR_rate1_zone2 <- positive_neg_value_GR_rate1_zone2[1,2]
positive_neg_value_rate2_GR_zone2 <- ifelse(filter(mean_zone_av_2, Rates == rate2)
- filter(mean_zone_av_2, Rates == Grower_rate)>0, "plus", "minus")
positive_neg_value_rate2_GR_zone2 <- positive_neg_value_rate2_GR_zone2[1,2]
# positive_neg_value_rate3_GR_zone2 <- ifelse(filter(mean_zone_av_2, Rates == rate3)
# - filter(mean_zone_av_2, Rates == Grower_rate)>0, "plus", "minus")
# positive_neg_value_rate3_GR_zone2 <- positive_neg_value_rate3_GR_zone2[1,2]
p_vlaue_text_zone_2 <- paste0("Yield at N ", Grower_rate, " is N ", rate1, " " ,positive_neg_value_GR_rate1_zone2, " ",
zone_av_2_rate1vsGR_res_sig$rounded, " and is ",
zone_av_2_rate1vsGR_res_sig$Significant, "\n",
"Yield at N ", rate2, " is N ", Grower_rate, " " ,positive_neg_value_rate2_GR_zone2, " ",
zone_av_2_rate2vsGR_res_sig$rounded, " and is ",
zone_av_2_rate2vsGR_res_sig$Significant, collapse = "\n")
# "Yield at P ", rate3, " is P ", Grower_rate , " " ,positive_neg_value_rate3_GR_zone2, " ",
# zone_av_2_rate3vsGR_res_sig$rounded, " and is ",
# zone_av_2_rate3vsGR_res_sig$Significant, collapse = "\n")
print(p_vlaue_text_zone_2)
library(grid)
Pvalue_on_graph <- grobTree(textGrob(p_vlaue_text_zone_2, x=0.1, y=0.10, hjust=0,
gp=gpar(col="black", fontsize=6, fontface="italic")))
# Plot the results
zone_av_2
zone_av_2$Rate_as_factor <- as.factor(zone_av_2$Rates)
zone_2 <- ggplot( zone_av_2, aes(Rate_as_factor, Yld))+
geom_boxplot(alpha=0.1)+
geom_point(colour = "blue", alpha = 0.1)+
stat_summary(fun.y = mean, geom = "errorbar", aes(ymax = ..y.., ymin = ..y..),
width = .75, linetype = "dashed")+
theme_bw()+
ylim(2.5,6)+
theme(axis.text=element_text(size=8),
axis.title=element_text(size=10,))+
labs(x = Fert_legend_name,
y= "Yield t/ha",
title = zone2)+
annotation_custom(Pvalue_on_graph)
zone_2
##save the graphs of the zone strip work
ggsave(path= graph_path, filename = "t-test_zone_zone2_strip.png", device = "png" ,
width = 20, height = 10, units = "cm")
#make a table of the mean yield for zones with t test reuslts
zone_av_2
mean_zone_av_2 <- group_by(zone_av_2, Rates) %>%
summarise(mean(Yld))
mean_zone_av_2 <- left_join(mean_zone_av_2,list_rates)
mean_zone_av_2and_res_sig <- rbind(zone_av_2_rate1vsGR_res_sig, zone_av_2_rate2vsGR_res_sig)
mean_zone_av_2 <- left_join(mean_zone_av_2,mean_zone_av_2and_res_sig)
mean_zone_av_2 <- mutate(mean_zone_av_2,
Zone = zone2,
Organisation =Organisation_db,
Contact_Farmer = Contact_Farmer_db,
Paddock_tested = Paddock_tested_db)
names(mean_zone_av_2)[2] <- "Yld"
write.csv(zone_av_2, paste0(graph_path,"/t_testzone_zone2_av.csv"))
#####################################################################################################################################
### should get this from harms database
#bring in data from the most current database from dropbox
#set up access to dropbox when is is password protected
token<-drop_auth()
saveRDS(token, "droptoken.rds")
token<-readRDS("droptoken.rds")
drop_acc(dtoken=token)
#https://www.dropbox.com/home/GRDC_Soil_Plant_Testing_Database
drop_download(path = "GRDC_Soil_Plant_Testing_Database/NP_database_28022020.xlsx",
local_path = database_name_of_path,
dtoken = token)
#bring in the excel sheet as a r object
database_name_of_path
harm_database <- read_excel(paste0(
database_name_of_path,"/", "NP_database_28022020.xlsx"),
sheet = "2019 full data", range = cell_cols("A:O"))
str(harm_database)
#fix up some names
harm_database<-
dplyr::select(harm_database,
"Paddock_code" = `Paddock code`,
Contact, Farmer,
"Paddock_tested" = `Paddock tested`,
Zone ,
Colwell,
DGT,
PBI ,
`Total N`,
`Colwell rec rate`,
`DGT rec rate`)
#remove the row that is missing..
harm_database <-filter(harm_database, Paddock_code != "NA")
## Pull out the infor for the paddock I am testing..
str(harm_database)
site <- filter(harm_database,
Paddock_tested == Zone_db) %>%
dplyr::select(5, 6: 11)
Zone_db
site
#make a table of the mean yield for zones
mean_zone_av_1
mean_zone_av_2
mean_zone_av_1_2 <- as.data.frame( rbind(mean_zone_av_1, mean_zone_av_2))
write.csv(mean_zone_av_1_2, paste0(graph_path,"/mean_zone_av_1_2.csv"))
mean_zone_av_1_2_display <- dplyr::select(mean_zone_av_1_2,
Rates,
Yld,
Zone)
mean_zone_av_1_2_display
mean_zone_av_1_2_display <- spread(mean_zone_av_1_2_display, Zone, Yld)
mean_zone_av_1_2_display <- round(mean_zone_av_1_2_display,2)
TSpecial <- ttheme_minimal(base_size = 8)
table1 <- tableGrob(site , rows = NULL, theme=TSpecial )
table2 <- tableGrob(mean_zone_av_1_2_display, rows = NULL, theme=TSpecial)
#get the name of the paddock...
paddock <- Zone_db
library(DT)
test <- textGrob(paddock)
####################################################################################################################################
## Arrange the outputs onto one page
segments
zone_1
zone_2
paddock
collection <- grid.arrange(zone_2, zone_1, table1, segments, table2, nrow = 5,
layout_matrix = cbind(c(1,1,5,4,4), c(2,2,3,4,4)))
collection
ggsave(path= graph_path, filename = paste0(paddock, "_collection.png"), device = "png",
width = 21, height = 15, units = "cm", collection)
##########################################################################################################################################
|
multiple <- function(x) {
for (i in 1:999) {
if (i %% 3 == 0 || i%% 5 == 0) {
x[i] = i
}
else {
x[i] = 0
}
}
y <- x[x != 0]
sum(y)
} | /r.R | no_license | Onikepe/R-Programming- | R | false | false | 154 | r | multiple <- function(x) {
for (i in 1:999) {
if (i %% 3 == 0 || i%% 5 == 0) {
x[i] = i
}
else {
x[i] = 0
}
}
y <- x[x != 0]
sum(y)
} |
make.control <- function(mainType = NULL,
dataFile = NULL,
treeFile = NULL,
outDir = NULL,
maxChrNum = NULL,
minChrNum = NULL,
branchMul = NULL,
simulationsNum = NULL,
logFile = NULL,
optimizePointsNum = NULL,
optimizeIterNum = NULL,
pars = list(),
control.path = NULL){
control <- c()
if(is.null(mainType)){
stop("specify the model")
}
if(!is.element(el = mainType,
set = c("All_Models",
"Run_Fix_Param",
"Optimize_Model"))){
stop("main type should be either 'All_Models', 'Run_Fix_Param' or 'Optimize_Model'")
}
if(is.null(dataFile)){
stop("provide the path to the chromosome counts")
}
if(is.null(treeFile)){
stop("provide the path to the tree file")
}
if(is.null(outDir)){
stop("provide the path to tree file")
}
if(mainType == "Optimize_Model"){
if(length(pars) == 0){
stop("model parameters are not given")
}
}
if(mainType == "Run_Fix_Param"){
if(length(pars) == 0){
stop("model parameters are not given")
}
}
control <- c(paste("_mainType", mainType, sep = " "),
paste("_dataFile", dataFile, sep = " "),
paste("_treeFile", treeFile, sep = " "),
paste("_outDir", outDir, sep = " "))
if(!is.null(maxChrNum)){
if(!is.numeric(maxChrNum)){
stop("Chromosome number should be numeric")
}else{
control <- c(control,
paste("_maxChrNum", maxChrNum))
}
}
if(!is.null(minChrNum)){
if(!is.numeric(minChrNum)){
stop("Chromosome number should be numeric")
}else{
control <- c(control,
paste("_minChrNum", minChrNum))
}
}
if(!is.null(branchMul)){
control <- c(control,
paste("_branchMul", branchMul, sep = " "))
}
if(!is.null(simulationsNum)){
control <- c(control,
paste("_simulationsNum", simulationsNum, sep = " "))
}
if(!is.null(logFile)){
control <- c(control,
paste("_branchMul", logFile, sep = " "))
}
if(!is.null(optimizePointsNum)){
if(is.null(optimizeIterNum)){
stop("optimizePointsNum and optimizeIterNum should be given together")
}
}
if(!is.null(optimizeIterNum)){
if(is.null(optimizePointsNum)){
stop("optimizePointsNum and optimizeIterNum should be given together")
}
}
if(!is.null(optimizePointsNum) && !is.null(optimizeIterNum)){
control <- c(control,
paste("_optimizePointsNum", optimizePointsNum, sep = " "),
paste("_optimizeIterNum", optimizeIterNum, sep = " "))
}
if(mainType %in% c("Optimize_Model","Run_Fix_Param")){
if(length(pars) == 0){
stop("model parameters are not given")
}
if(length(pars) != 0){
pars.vector <- c()
for (i in 1:(length(pars))){
pars.vector[(i)] <- paste("_", names(pars)[i], " ", pars[i][[1]], sep = "")
}
control <- c(control, pars.vector)
}
}
if(is.null(control.path)){
stop("path to save the control file is not given")
}
write(control, file= control.path)
}
| /analysis/rscripts/12.make.control.R | no_license | Tsylvester8/Polyneoptera | R | false | false | 3,415 | r | make.control <- function(mainType = NULL,
dataFile = NULL,
treeFile = NULL,
outDir = NULL,
maxChrNum = NULL,
minChrNum = NULL,
branchMul = NULL,
simulationsNum = NULL,
logFile = NULL,
optimizePointsNum = NULL,
optimizeIterNum = NULL,
pars = list(),
control.path = NULL){
control <- c()
if(is.null(mainType)){
stop("specify the model")
}
if(!is.element(el = mainType,
set = c("All_Models",
"Run_Fix_Param",
"Optimize_Model"))){
stop("main type should be either 'All_Models', 'Run_Fix_Param' or 'Optimize_Model'")
}
if(is.null(dataFile)){
stop("provide the path to the chromosome counts")
}
if(is.null(treeFile)){
stop("provide the path to the tree file")
}
if(is.null(outDir)){
stop("provide the path to tree file")
}
if(mainType == "Optimize_Model"){
if(length(pars) == 0){
stop("model parameters are not given")
}
}
if(mainType == "Run_Fix_Param"){
if(length(pars) == 0){
stop("model parameters are not given")
}
}
control <- c(paste("_mainType", mainType, sep = " "),
paste("_dataFile", dataFile, sep = " "),
paste("_treeFile", treeFile, sep = " "),
paste("_outDir", outDir, sep = " "))
if(!is.null(maxChrNum)){
if(!is.numeric(maxChrNum)){
stop("Chromosome number should be numeric")
}else{
control <- c(control,
paste("_maxChrNum", maxChrNum))
}
}
if(!is.null(minChrNum)){
if(!is.numeric(minChrNum)){
stop("Chromosome number should be numeric")
}else{
control <- c(control,
paste("_minChrNum", minChrNum))
}
}
if(!is.null(branchMul)){
control <- c(control,
paste("_branchMul", branchMul, sep = " "))
}
if(!is.null(simulationsNum)){
control <- c(control,
paste("_simulationsNum", simulationsNum, sep = " "))
}
if(!is.null(logFile)){
control <- c(control,
paste("_branchMul", logFile, sep = " "))
}
if(!is.null(optimizePointsNum)){
if(is.null(optimizeIterNum)){
stop("optimizePointsNum and optimizeIterNum should be given together")
}
}
if(!is.null(optimizeIterNum)){
if(is.null(optimizePointsNum)){
stop("optimizePointsNum and optimizeIterNum should be given together")
}
}
if(!is.null(optimizePointsNum) && !is.null(optimizeIterNum)){
control <- c(control,
paste("_optimizePointsNum", optimizePointsNum, sep = " "),
paste("_optimizeIterNum", optimizeIterNum, sep = " "))
}
if(mainType %in% c("Optimize_Model","Run_Fix_Param")){
if(length(pars) == 0){
stop("model parameters are not given")
}
if(length(pars) != 0){
pars.vector <- c()
for (i in 1:(length(pars))){
pars.vector[(i)] <- paste("_", names(pars)[i], " ", pars[i][[1]], sep = "")
}
control <- c(control, pars.vector)
}
}
if(is.null(control.path)){
stop("path to save the control file is not given")
}
write(control, file= control.path)
}
|
# ************************************************************ #
# Get stratified representative sample of the entire population of land parcels
# ************************************************************ #
# outputs:
# representative samples of each matched scale from the unmatched data
# based on https://gist.github.com/gianlucamalato/24f5d560cd27ded356f70843e22b79db#file-stratified-sampling-r
# blog: https://towardsdatascience.com/stratified-sampling-and-how-to-perform-it-in-r-8b753efde1ef
# libraries needed
library(dplyr)
library(readr)
library(foreign)
library(sqldf)
# install.packages("SamplingStrata")
# library(SamplingStrata)
# set wd's
#wd_main <- "/data/MAS-group-share/04_personal/Andrea/P1_analysis/"
wd_main <- "/gpfs1/data/idiv_meyer/01_projects/Andrea/P1/"
# 1. tables with matches ----
# read in csv tables that have only the gid's that were matched:
setwd(paste0(wd_main, "outputs/MatchedDatasets_onlymatches"))
l <- list.files()
names <- gsub(".csv","", l)
# set job
i=as.integer(Sys.getenv('SLURM_ARRAY_TASK_ID'))
print(names[i])
matched <- read_csv(l[i], col_types = cols_only(gid = col_double(),
mun = col_character(),
biome = col_character()))
n_matched <- nrow(matched)
# 2. get tables with all unmatched gid's + variables needed to stratify ----
setwd(paste0(wd_main, "inputs/00_data/for_matching/forMatchAnalysisCEM"))
unmatched_d <- as.data.frame(read_csv(l[i], col_types = "?????__?_??_"))
head(unmatched_d)
# 3. conduct stratification ----
#i=1
# vars to consider in stratification
dimensions <- setdiff(names(unmatched_d), c("gid", "mun","biome"))
# establish n sample
n_sample <- n_matched # NOTE: would have to correspond exactly
# create empty table (to be filled in)
generated <- head(as.data.frame(unmatched_d), 0)
while (nrow(generated) < n_sample) {
# For debug purposes
cat(nrow(generated),"\n")
flush.console()
tmp = unmatched_d
# Calculate the histogram for each dimension
# and select one value at a time, slicing the
# original dataset according to its histogram
for (j in 1:length(dimensions)) { # for each variable
colname = dimensions[j]
if (class(unmatched_d[[colname]]) %in% c("numeric") && # if it's numeric
sum(unmatched_d[[colname]] == as.integer(unmatched_d[[colname]]), na.rm = TRUE) == 0 # and if it sums to 0
) {
# Numerical variable. Histogram with Rice's Rule
# If there are NA's, stratify on those
na_count = sum(is.na(tmp[[colname]]))
not_na_count = length(tmp[[colname]]) - na_count
s = sample(c(0,1),prob = c(not_na_count,na_count),1)
if (s == 0) {
# Histogram stratification based on breaks calculated on the
# population
n_breaks = floor(2*sum(!is.na(unmatched_d[[colname]]))**((1/3)))
bar_size = (max(unmatched_d[[colname]],na.rm = TRUE)-min(unmatched_d[[colname]],na.rm = TRUE))/n_breaks
breaks = sapply(0:n_breaks,function(i) {min(unmatched_d[[colname]],na.rm = TRUE) + i*bar_size})
h = hist(tmp[[colname]],breaks=breaks,plot = F)
# Select one bar of the histogram according to the density
bar_id = sample(1:length(h$mids),prob = h$counts,1)
bar_start = h$breaks[bar_id]
bar_end = h$breaks[bar_id + 1]
tmp = tmp[tmp[[colname]] >= bar_start & tmp[[colname]] < bar_end & !is.na(tmp[[colname]]),]
} else {
# NA
tmp = tmp[is.na(tmp[[colname]]),]
}
} else {
# Categorical variable
# Histogram for the selected dimension
aggr = as.data.frame(table(tmp[,colname],useNA="ifany"))
names(aggr) = c("dim","count")
# Generate a value according to the histogram
generated_value = sample(aggr$dim,prob=aggr$count,1)
# Slice the actual multivariate histogram in order to
# take only records with the selected value on the
# selected dimension
if (!is.na(generated_value)) {
tmp = tmp[tmp[[colname]] == generated_value & !is.na(tmp[[colname]]),]
}
else {
tmp = tmp[is.na(tmp[[colname]]),]
}
}
}
# Once the procedure finishes, we get a bulk of records
# with the same values of each dimension. Let's take
# one of these records uniformly
random_index = sample(1:nrow(tmp),1)
new_record = tmp[random_index,]
# Let's remove duplicates
inserted_record = sqldf("select * from new_record except select * from generated")
# Insert in the "generated" data frame and repeat until desired sample size is reached
generated = rbind(generated,inserted_record)
}
setwd(paste0(wd_main, "outputs/representativeSamplesPop"))
write.csv(generated, paste0(names[i], ".csv"), row.names = F)
| /06_stratifiedRepresentativeSample.R | permissive | pacheco-andrea/tenure-defor-br | R | false | false | 4,907 | r | # ************************************************************ #
# Get stratified representative sample of the entire population of land parcels
# ************************************************************ #
# outputs:
# representative samples of each matched scale from the unmatched data
# based on https://gist.github.com/gianlucamalato/24f5d560cd27ded356f70843e22b79db#file-stratified-sampling-r
# blog: https://towardsdatascience.com/stratified-sampling-and-how-to-perform-it-in-r-8b753efde1ef
# libraries needed
library(dplyr)
library(readr)
library(foreign)
library(sqldf)
# install.packages("SamplingStrata")
# library(SamplingStrata)
# set wd's
#wd_main <- "/data/MAS-group-share/04_personal/Andrea/P1_analysis/"
wd_main <- "/gpfs1/data/idiv_meyer/01_projects/Andrea/P1/"
# 1. tables with matches ----
# read in csv tables that have only the gid's that were matched:
setwd(paste0(wd_main, "outputs/MatchedDatasets_onlymatches"))
l <- list.files()
names <- gsub(".csv","", l)
# set job
i=as.integer(Sys.getenv('SLURM_ARRAY_TASK_ID'))
print(names[i])
matched <- read_csv(l[i], col_types = cols_only(gid = col_double(),
mun = col_character(),
biome = col_character()))
n_matched <- nrow(matched)
# 2. get tables with all unmatched gid's + variables needed to stratify ----
setwd(paste0(wd_main, "inputs/00_data/for_matching/forMatchAnalysisCEM"))
unmatched_d <- as.data.frame(read_csv(l[i], col_types = "?????__?_??_"))
head(unmatched_d)
# 3. conduct stratification ----
#i=1
# vars to consider in stratification
dimensions <- setdiff(names(unmatched_d), c("gid", "mun","biome"))
# establish n sample
n_sample <- n_matched # NOTE: would have to correspond exactly
# create empty table (to be filled in)
generated <- head(as.data.frame(unmatched_d), 0)
while (nrow(generated) < n_sample) {
# For debug purposes
cat(nrow(generated),"\n")
flush.console()
tmp = unmatched_d
# Calculate the histogram for each dimension
# and select one value at a time, slicing the
# original dataset according to its histogram
for (j in 1:length(dimensions)) { # for each variable
colname = dimensions[j]
if (class(unmatched_d[[colname]]) %in% c("numeric") && # if it's numeric
sum(unmatched_d[[colname]] == as.integer(unmatched_d[[colname]]), na.rm = TRUE) == 0 # and if it sums to 0
) {
# Numerical variable. Histogram with Rice's Rule
# If there are NA's, stratify on those
na_count = sum(is.na(tmp[[colname]]))
not_na_count = length(tmp[[colname]]) - na_count
s = sample(c(0,1),prob = c(not_na_count,na_count),1)
if (s == 0) {
# Histogram stratification based on breaks calculated on the
# population
n_breaks = floor(2*sum(!is.na(unmatched_d[[colname]]))**((1/3)))
bar_size = (max(unmatched_d[[colname]],na.rm = TRUE)-min(unmatched_d[[colname]],na.rm = TRUE))/n_breaks
breaks = sapply(0:n_breaks,function(i) {min(unmatched_d[[colname]],na.rm = TRUE) + i*bar_size})
h = hist(tmp[[colname]],breaks=breaks,plot = F)
# Select one bar of the histogram according to the density
bar_id = sample(1:length(h$mids),prob = h$counts,1)
bar_start = h$breaks[bar_id]
bar_end = h$breaks[bar_id + 1]
tmp = tmp[tmp[[colname]] >= bar_start & tmp[[colname]] < bar_end & !is.na(tmp[[colname]]),]
} else {
# NA
tmp = tmp[is.na(tmp[[colname]]),]
}
} else {
# Categorical variable
# Histogram for the selected dimension
aggr = as.data.frame(table(tmp[,colname],useNA="ifany"))
names(aggr) = c("dim","count")
# Generate a value according to the histogram
generated_value = sample(aggr$dim,prob=aggr$count,1)
# Slice the actual multivariate histogram in order to
# take only records with the selected value on the
# selected dimension
if (!is.na(generated_value)) {
tmp = tmp[tmp[[colname]] == generated_value & !is.na(tmp[[colname]]),]
}
else {
tmp = tmp[is.na(tmp[[colname]]),]
}
}
}
# Once the procedure finishes, we get a bulk of records
# with the same values of each dimension. Let's take
# one of these records uniformly
random_index = sample(1:nrow(tmp),1)
new_record = tmp[random_index,]
# Let's remove duplicates
inserted_record = sqldf("select * from new_record except select * from generated")
# Insert in the "generated" data frame and repeat until desired sample size is reached
generated = rbind(generated,inserted_record)
}
setwd(paste0(wd_main, "outputs/representativeSamplesPop"))
write.csv(generated, paste0(names[i], ".csv"), row.names = F)
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -1.24269601614303e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615768329-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -1.24269601614303e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
## stocks
stock.names <- c('wolfimm','wolfmat')
## Collect catches by fleet:
lln.landings <- mfdb_sample_count(mdb, c('age', 'length'), c(list(
gear=c('HLN','LLN'),
sampling_type = 'LND',
species = defaults$species),
defaults))
bmt.landings <- mfdb_sample_count(mdb, c('age', 'length'), c(list(
gear=c('BMT','NPT'),
sampling_type = 'LND',
species = defaults$species),
defaults))
gil.landings <- mfdb_sample_count(mdb, c('age', 'length'), c(list(
gear='GIL',
sampling_type = 'LND',
species = defaults$species),
defaults))
foreign.landings <-
mfdb_sample_count(mdb, c('age', 'length'),
c(list(
sampling_type = 'FLND',
species = defaults$species),
defaults))
tmp <- defaults
tmp$year <- 1960:1982
old.landings <-
mfdb_sample_count(mdb, c('age', 'length'),
c(list(
sampling_type = 'OLND',
species = defaults$species),
tmp))
## make fleets
lln.fleet <-
Rgadget:::make.gadget.fleet(name='lln',suitability='exponentiall50',
fleet.data=lln.landings[[1]],
stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,lln.fleet)
bmt.fleet <-
Rgadget:::make.gadget.fleet(name='bmt',suitability='exponentiall50',
fleet.data=bmt.landings[[1]],
stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,bmt.fleet)
gil.fleet <-
Rgadget:::make.gadget.fleet(name='gil',suitability='exponentiall50',
fleet.data=gil.landings[[1]],
stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,gil.fleet)
## nominal survey fleet catches
igfs.landings <- data.frame(year=defaults$year,step=1,number=1,area=1)
igfs.fleet <-
Rgadget:::make.gadget.fleet(name='igfs',suitability='exponentiall50',
fleet.data=igfs.landings,
stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,igfs.fleet)
#aut.landings <- data.frame(year=defaults$year,step=4,number=1,area=1)
#aut.fleet <-
# Rgadget:::make.gadget.fleet(name='aut',suitability='exponentiall50',
# fleet.data=aut.landings,
# stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,aut.fleet)
## old fleet
old.fleet <-
Rgadget:::make.gadget.fleet(name='oldfleet',suitability='exponentiall50',
fleet.data=old.landings[[1]],
stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,old.fleet)
## foreign fleet
foreign.fleet <-
Rgadget:::make.gadget.fleet(name='foreign',suitability='exponentiall50',
fleet.data=foreign.landings[[1]],
stocknames=stock.names)
## foreign fishing vessels are longliners
foreign.fleet@suitability <- lln.fleet@suitability
old.fleet@suitability <- lln.fleet@suitability
#Rgadget:::gadget_dir_write(gd,foreign.fleet)
| /09-wolf/00-data/setup-fleets.R | no_license | bthe/gadget-models | R | false | false | 3,359 | r | ## stocks
stock.names <- c('wolfimm','wolfmat')
## Collect catches by fleet:
lln.landings <- mfdb_sample_count(mdb, c('age', 'length'), c(list(
gear=c('HLN','LLN'),
sampling_type = 'LND',
species = defaults$species),
defaults))
bmt.landings <- mfdb_sample_count(mdb, c('age', 'length'), c(list(
gear=c('BMT','NPT'),
sampling_type = 'LND',
species = defaults$species),
defaults))
gil.landings <- mfdb_sample_count(mdb, c('age', 'length'), c(list(
gear='GIL',
sampling_type = 'LND',
species = defaults$species),
defaults))
foreign.landings <-
mfdb_sample_count(mdb, c('age', 'length'),
c(list(
sampling_type = 'FLND',
species = defaults$species),
defaults))
tmp <- defaults
tmp$year <- 1960:1982
old.landings <-
mfdb_sample_count(mdb, c('age', 'length'),
c(list(
sampling_type = 'OLND',
species = defaults$species),
tmp))
## make fleets
lln.fleet <-
Rgadget:::make.gadget.fleet(name='lln',suitability='exponentiall50',
fleet.data=lln.landings[[1]],
stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,lln.fleet)
bmt.fleet <-
Rgadget:::make.gadget.fleet(name='bmt',suitability='exponentiall50',
fleet.data=bmt.landings[[1]],
stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,bmt.fleet)
gil.fleet <-
Rgadget:::make.gadget.fleet(name='gil',suitability='exponentiall50',
fleet.data=gil.landings[[1]],
stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,gil.fleet)
## nominal survey fleet catches
igfs.landings <- data.frame(year=defaults$year,step=1,number=1,area=1)
igfs.fleet <-
Rgadget:::make.gadget.fleet(name='igfs',suitability='exponentiall50',
fleet.data=igfs.landings,
stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,igfs.fleet)
#aut.landings <- data.frame(year=defaults$year,step=4,number=1,area=1)
#aut.fleet <-
# Rgadget:::make.gadget.fleet(name='aut',suitability='exponentiall50',
# fleet.data=aut.landings,
# stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,aut.fleet)
## old fleet
old.fleet <-
Rgadget:::make.gadget.fleet(name='oldfleet',suitability='exponentiall50',
fleet.data=old.landings[[1]],
stocknames=stock.names)
#Rgadget:::gadget_dir_write(gd,old.fleet)
## foreign fleet
foreign.fleet <-
Rgadget:::make.gadget.fleet(name='foreign',suitability='exponentiall50',
fleet.data=foreign.landings[[1]],
stocknames=stock.names)
## foreign fishing vessels are longliners
foreign.fleet@suitability <- lln.fleet@suitability
old.fleet@suitability <- lln.fleet@suitability
#Rgadget:::gadget_dir_write(gd,foreign.fleet)
|
#' reshape_poly
#'
#' @param poly
#' @param poly_before
#' @param name_of_unit
#' @keywords
#' @keywords
#' @export
#' @examples
#' @importFrom magrittr %>%
#'
#'
reshape_poly<-function(poly,poly_before,name_of_unit) {
poly@data<-cbind(poly_before@data,poly@data,
row.names = NULL)
poly@data[,grepl("is_",colnames(poly@data))]<-FALSE
poly$id<-1:length(poly)
if ("dummy"%in%colnames(poly@data))
poly@data<-poly@data %>%
dplyr::select(-dummy)
colnames(poly@data)[(ncol(poly)-2):
ncol(poly)]<-
paste0(name_of_unit,"_",
colnames(poly@data)[(ncol(poly)-2):
ncol(poly)])
poly@data[,paste0("is_",name_of_unit)]<-TRUE
poly
}
| /R/reshape_poly.R | no_license | senickel/geosampling | R | false | false | 733 | r | #' reshape_poly
#'
#' @param poly
#' @param poly_before
#' @param name_of_unit
#' @keywords
#' @keywords
#' @export
#' @examples
#' @importFrom magrittr %>%
#'
#'
reshape_poly<-function(poly,poly_before,name_of_unit) {
poly@data<-cbind(poly_before@data,poly@data,
row.names = NULL)
poly@data[,grepl("is_",colnames(poly@data))]<-FALSE
poly$id<-1:length(poly)
if ("dummy"%in%colnames(poly@data))
poly@data<-poly@data %>%
dplyr::select(-dummy)
colnames(poly@data)[(ncol(poly)-2):
ncol(poly)]<-
paste0(name_of_unit,"_",
colnames(poly@data)[(ncol(poly)-2):
ncol(poly)])
poly@data[,paste0("is_",name_of_unit)]<-TRUE
poly
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeTag.R
\name{makeTag}
\alias{makeTag}
\title{Create a tag from DMS and CV filenames and a user tag.}
\usage{
makeTag(CVTable, msTable, userTag)
}
\arguments{
\item{CVTable}{(string) name of CV file.}
\item{msTable}{(string) name of ms file.}
\item{userTag}{(string) string to append to results filenames.}
}
\description{
Create a tag from DMS and CV filenames and a user tag.
}
| /man/makeTag.Rd | no_license | ppernot/msAnaLib | R | false | true | 462 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeTag.R
\name{makeTag}
\alias{makeTag}
\title{Create a tag from DMS and CV filenames and a user tag.}
\usage{
makeTag(CVTable, msTable, userTag)
}
\arguments{
\item{CVTable}{(string) name of CV file.}
\item{msTable}{(string) name of ms file.}
\item{userTag}{(string) string to append to results filenames.}
}
\description{
Create a tag from DMS and CV filenames and a user tag.
}
|
#Necesita para correr en Google Cloud
#16 GB de memoria RAM
#256 GB de espacio en el disco local
#4 vCPU
# LightGBM con min_data_in_leaf= 4000 y quitando "mpasivos_margen"
#limpio la memoria
rm( list=ls() ) #remove all objects
gc() #garbage collection
require("data.table")
require("lightgbm")
setwd("~/buckets/b1/")
#cargo el dataset donde voy a entrenar
dataset <- fread("./datasetsOri/paquete_premium_202011.csv")
#paso la clase a binaria que tome valores {0,1} enteros
dataset[ , clase01 := ifelse( clase_ternaria=="BAJA+2", 1L, 0L) ]
campos_malos <- c("mpasivos_margen")
#los campos que se van a utilizar
campos_buenos <- setdiff( colnames(dataset), c("clase_ternaria","clase01", campos_malos) )
#dejo los datos en el formato que necesita LightGBM
dtrain <- lgb.Dataset( data= data.matrix( dataset[ , campos_buenos, with=FALSE]),
label= dataset$clase01 )
#genero el modelo con los parametros por default
modelo <- lgb.train( data= dtrain,
param= list( objective= "binary", min_data_in_leaf= 4000 )
)
#aplico el modelo a los datos sin clase, 202101
dapply <- fread("./datasetsOri/paquete_premium_202101.csv")
#aplico el modelo a los datos nuevos
prediccion <- predict( modelo,
data.matrix( dapply[, campos_buenos, with=FALSE ]) )
#Genero la entrega para Kaggle
entrega <- as.data.table( list( "numero_de_cliente"= dapply[ , numero_de_cliente],
"Predicted"= prediccion > 0.025) ) #genero la salida
#genero el archivo para Kaggle
fwrite( entrega,
file= "./kaggle/612_lgb_drift_P.csv",
sep= "," )
| /clasesGustavo/TareasHogar/Tarea20210917/612_lgb_drift_P.r | no_license | gerbeldo/labo2021 | R | false | false | 1,716 | r | #Necesita para correr en Google Cloud
#16 GB de memoria RAM
#256 GB de espacio en el disco local
#4 vCPU
# LightGBM con min_data_in_leaf= 4000 y quitando "mpasivos_margen"
#limpio la memoria
rm( list=ls() ) #remove all objects
gc() #garbage collection
require("data.table")
require("lightgbm")
setwd("~/buckets/b1/")
#cargo el dataset donde voy a entrenar
dataset <- fread("./datasetsOri/paquete_premium_202011.csv")
#paso la clase a binaria que tome valores {0,1} enteros
dataset[ , clase01 := ifelse( clase_ternaria=="BAJA+2", 1L, 0L) ]
campos_malos <- c("mpasivos_margen")
#los campos que se van a utilizar
campos_buenos <- setdiff( colnames(dataset), c("clase_ternaria","clase01", campos_malos) )
#dejo los datos en el formato que necesita LightGBM
dtrain <- lgb.Dataset( data= data.matrix( dataset[ , campos_buenos, with=FALSE]),
label= dataset$clase01 )
#genero el modelo con los parametros por default
modelo <- lgb.train( data= dtrain,
param= list( objective= "binary", min_data_in_leaf= 4000 )
)
#aplico el modelo a los datos sin clase, 202101
dapply <- fread("./datasetsOri/paquete_premium_202101.csv")
#aplico el modelo a los datos nuevos
prediccion <- predict( modelo,
data.matrix( dapply[, campos_buenos, with=FALSE ]) )
#Genero la entrega para Kaggle
entrega <- as.data.table( list( "numero_de_cliente"= dapply[ , numero_de_cliente],
"Predicted"= prediccion > 0.025) ) #genero la salida
#genero el archivo para Kaggle
fwrite( entrega,
file= "./kaggle/612_lgb_drift_P.csv",
sep= "," )
|
test_that("Time period feature flags are enabled within specified boundaries", {
feature_flag <- create_time_period_feature_flag(
from = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC"),
to = ISOdatetime(2020, 1, 1, 13, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(2020, 1, 1, 12, 0, 0, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_true(is_enabled(feature_flag))
})
test_that("Time period feature flags are disabled when not in specified boundaries", {
feature_flag <- create_time_period_feature_flag(
from = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC"),
to = ISOdatetime(2020, 1, 1, 13, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(2020, 1, 1, 15, 0, 0, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_false(is_enabled(feature_flag))
})
test_that("Time period feature flags bounded from are enabled from specified boundry", {
feature_flag <- create_time_period_feature_flag(
from = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(2920, 1, 1, 15, 0, 0, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_true(is_enabled(feature_flag))
})
test_that("Time period feature flags bounded from are disabled to specified boundry", {
feature_flag <- create_time_period_feature_flag(
from = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(2020, 1, 1, 9, 59, 59, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_false(is_enabled(feature_flag))
})
test_that("Time period feature flags bounded `to` are enabled until specified boundry", {
feature_flag <- create_time_period_feature_flag(
to = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(1990, 1, 1, 9, 0, 0, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_true(is_enabled(feature_flag))
})
test_that("Time period feature flags bounded to are disabled from specified boundry", {
feature_flag <- create_time_period_feature_flag(
to = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(2020, 1, 1, 10, 59, 59, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_false(is_enabled(feature_flag))
})
| /tests/testthat/test-feature_flag-time_period.R | permissive | szymanskir/featureflag | R | false | false | 2,523 | r | test_that("Time period feature flags are enabled within specified boundaries", {
feature_flag <- create_time_period_feature_flag(
from = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC"),
to = ISOdatetime(2020, 1, 1, 13, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(2020, 1, 1, 12, 0, 0, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_true(is_enabled(feature_flag))
})
test_that("Time period feature flags are disabled when not in specified boundaries", {
feature_flag <- create_time_period_feature_flag(
from = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC"),
to = ISOdatetime(2020, 1, 1, 13, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(2020, 1, 1, 15, 0, 0, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_false(is_enabled(feature_flag))
})
test_that("Time period feature flags bounded from are enabled from specified boundry", {
feature_flag <- create_time_period_feature_flag(
from = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(2920, 1, 1, 15, 0, 0, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_true(is_enabled(feature_flag))
})
test_that("Time period feature flags bounded from are disabled to specified boundry", {
feature_flag <- create_time_period_feature_flag(
from = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(2020, 1, 1, 9, 59, 59, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_false(is_enabled(feature_flag))
})
test_that("Time period feature flags bounded `to` are enabled until specified boundry", {
feature_flag <- create_time_period_feature_flag(
to = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(1990, 1, 1, 9, 0, 0, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_true(is_enabled(feature_flag))
})
test_that("Time period feature flags bounded to are disabled from specified boundry", {
feature_flag <- create_time_period_feature_flag(
to = ISOdatetime(2020, 1, 1, 10, 0, 0, tz = "UTC")
)
sys_time_stub <- function() ISOdatetime(2020, 1, 1, 10, 59, 59, tz = "UTC")
mockery::stub(is_enabled.time_period_feature_flag, "Sys.time", sys_time_stub)
expect_false(is_enabled(feature_flag))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{beta30}
\alias{beta30}
\title{Beta 30}
\format{A list of 30 rows in 1 variable
\describe{
\item{x}{observations from beta distribution}
}}
\usage{
beta30
}
\description{
A data set containg 30 observations generated from beta(theta, 1)
distribution where theta = 4
}
\details{
see page 375 of the book
}
\references{
Hogg, R. McKean, J. Craig, A. (2018) Introduction to
Mathematical Statistics, 8th Ed. Boston: Pearson.
}
\keyword{datasets}
| /man/beta30.Rd | no_license | joemckean/mathstat | R | false | true | 547 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{beta30}
\alias{beta30}
\title{Beta 30}
\format{A list of 30 rows in 1 variable
\describe{
\item{x}{observations from beta distribution}
}}
\usage{
beta30
}
\description{
A data set containg 30 observations generated from beta(theta, 1)
distribution where theta = 4
}
\details{
see page 375 of the book
}
\references{
Hogg, R. McKean, J. Craig, A. (2018) Introduction to
Mathematical Statistics, 8th Ed. Boston: Pearson.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{trip_match_v3}
\alias{trip_match_v3}
\title{trip matching algorithm R version 3.}
\usage{
trip_match_v3(M1, M2, dist_cut = 0.001, heading_cut = 0.01,
match_n_cut = 5)
}
\arguments{
\item{M1}{.}
\item{M2}{.}
\item{dist_cut}{.}
\item{heading_cut}{.}
\item{match_n_cut}{.}
}
\value{
logical vector indicating matched index.
}
\description{
trip matching algorithm R version 3.
}
\examples{
}
| /man/trip_match_v3.Rd | no_license | rapanzuena/PathMatch | R | false | true | 500 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{trip_match_v3}
\alias{trip_match_v3}
\title{trip matching algorithm R version 3.}
\usage{
trip_match_v3(M1, M2, dist_cut = 0.001, heading_cut = 0.01,
match_n_cut = 5)
}
\arguments{
\item{M1}{.}
\item{M2}{.}
\item{dist_cut}{.}
\item{heading_cut}{.}
\item{match_n_cut}{.}
}
\value{
logical vector indicating matched index.
}
\description{
trip matching algorithm R version 3.
}
\examples{
}
|
source("/srv/shiny-server/helper/mysql/con_mysql.R")
source("/srv/shiny-server/helper/mysql/json_df2.R")
#############################
######## constant #########
#############################
DATA_TABLE <- "boatinternational_com"
BUILDER <- "Builder"
NAME <- "Name"
PRICE <- "price"
YEAR <- "Year.of.Build"
LOA <- "Length.Overall"
EURO <- "\u20AC"
#############################
#############################
########## init ###########
#############################
# connect to the mysql server, and read the table: DATA_TABLE
rawdata <- con_mysql(DATA_TABLE)
# convert the json to dataframe, the json files are in rawdata$result
data <- json_df(rawdata$result)
# extract selected columns to make a new data frame
yachts <- data.frame("builder" = data[BUILDER], "name" = data[NAME], "text_price" = data[PRICE],"text_year" = data[YEAR],"text_loa" = data[LOA])
colnames(yachts) <- c("builder", "name", "text_price", "text_year", "text_loa")
#############################
##### clean "price" #######
#############################
# remove "*" and ",", and convert the factor type price to numeric
yachts$text_price <- gsub("\\*", "", yachts$text_price)
yachts$price <- gsub(",","", yachts$text_price)
yachts$price <- gsub("\\$", "", yachts$price)
yachts$price <- gsub(EURO, "", yachts$price)
yachts$price <- as.numeric(as.character(yachts$price))
# add "currency" to yachts
yachts$currency <- NA
yachts$currency[grepl("\\$",yachts$text_price)] <- "USD"
yachts$currency[grepl(EURO,yachts$text_price)] <- "EUR"
yachts$currency <- as.factor(yachts$currency)
#############################
###### clean "year" ########
#############################
yachts$year <- as.numeric(as.character(yachts$text_year))
#############################
####### clean "loa" ########
#############################
yachts$loa <- gsub("m \\(.*\\)", "", yachts$text_loa)
yachts$loa <- as.numeric(as.character(yachts$loa))
# final, delete NA in the price
price_is_not_na <- !is.na(yachts$price)
yachts <- yachts[price_is_not_na,]
currency_is_not_na <- !is.na(yachts$currency)
yachts <- yachts[currency_is_not_na,]
#############################
########## test ###########
#############################
#head(yachts)
#str(yachts)
#yachts$price
#yachts$currency
#yachts$year
#yachts$loa
#yachts$text_loa
save(yachts, file="/srv/shiny-server/boatinternational/data_boatinternational.Rdata")
| /shiny-server/boatinternational/cleaning.R | no_license | kent119/R-shiny-yachting-market-research | R | false | false | 2,388 | r | source("/srv/shiny-server/helper/mysql/con_mysql.R")
source("/srv/shiny-server/helper/mysql/json_df2.R")
#############################
######## constant #########
#############################
DATA_TABLE <- "boatinternational_com"
BUILDER <- "Builder"
NAME <- "Name"
PRICE <- "price"
YEAR <- "Year.of.Build"
LOA <- "Length.Overall"
EURO <- "\u20AC"
#############################
#############################
########## init ###########
#############################
# connect to the mysql server, and read the table: DATA_TABLE
rawdata <- con_mysql(DATA_TABLE)
# convert the json to dataframe, the json files are in rawdata$result
data <- json_df(rawdata$result)
# extract selected columns to make a new data frame
yachts <- data.frame("builder" = data[BUILDER], "name" = data[NAME], "text_price" = data[PRICE],"text_year" = data[YEAR],"text_loa" = data[LOA])
colnames(yachts) <- c("builder", "name", "text_price", "text_year", "text_loa")
#############################
##### clean "price" #######
#############################
# remove "*" and ",", and convert the factor type price to numeric
yachts$text_price <- gsub("\\*", "", yachts$text_price)
yachts$price <- gsub(",","", yachts$text_price)
yachts$price <- gsub("\\$", "", yachts$price)
yachts$price <- gsub(EURO, "", yachts$price)
yachts$price <- as.numeric(as.character(yachts$price))
# add "currency" to yachts
yachts$currency <- NA
yachts$currency[grepl("\\$",yachts$text_price)] <- "USD"
yachts$currency[grepl(EURO,yachts$text_price)] <- "EUR"
yachts$currency <- as.factor(yachts$currency)
#############################
###### clean "year" ########
#############################
yachts$year <- as.numeric(as.character(yachts$text_year))
#############################
####### clean "loa" ########
#############################
yachts$loa <- gsub("m \\(.*\\)", "", yachts$text_loa)
yachts$loa <- as.numeric(as.character(yachts$loa))
# final, delete NA in the price
price_is_not_na <- !is.na(yachts$price)
yachts <- yachts[price_is_not_na,]
currency_is_not_na <- !is.na(yachts$currency)
yachts <- yachts[currency_is_not_na,]
#############################
########## test ###########
#############################
#head(yachts)
#str(yachts)
#yachts$price
#yachts$currency
#yachts$year
#yachts$loa
#yachts$text_loa
save(yachts, file="/srv/shiny-server/boatinternational/data_boatinternational.Rdata")
|
# server.R
library(shiny)
library(ggplot2)
library(gridExtra)
library(ggthemes)
library(dplyr)
library(scales)
# runApp('C:/Users/Paul/Documents/R/Applications/Mortgage')
# term=30; principal=236000; down=0.2; interest=0.0425; irr=0.05
simulate_data = function(term, principal, down, interest, irr) {
n_months = term * 12
Month = seq(1, n_months, by=1)
monthly_interest_rate = interest / 12
beg_principal = round(principal - (down * principal), 0)
mortgage = data.frame(Month)
payment = beg_principal * ( monthly_interest_rate * (1 + monthly_interest_rate) ^ n_months) /
((1 + monthly_interest_rate) ^ n_months - 1)
payment = round(payment, 0)
mortgage$Payment = payment
mortgage$Remaining.Principal = 0
# B = balance B = L[(1 + c)^n - (1 + c)^p]/[(1 + c)^n - 1]
index = seq(2, n_months)
amt_left = NULL
for (i in 1:length(index)) {
amt_left[i] = beg_principal * ((1 + monthly_interest_rate) ^ n_months - (1 + monthly_interest_rate) ^ i) /
((1 + monthly_interest_rate) ^ n_months - 1)
amt_left
}
mortgage[c(2:n_months),'Remaining.Principal'] = round(amt_left)
mortgage[1,'Remaining.Principal'] = beg_principal
amt_left = mortgage$Remaining.Principal
mortgage$Interest.Paid = round(amt_left * monthly_interest_rate, 0)
index = seq(1, n_months)
raw_int = mortgage$Interest.Paid
monthly_irr = irr / 12
pv_int = NULL
for (i in 1:length(index)) {
pv_int[i] = raw_int[i] / (1 + monthly_irr) ^ index[i]
pv_int
}
mortgage$PV.Interest.Paid = round(pv_int)
mortgage = data.frame(mortgage %>% mutate(PV.Cumulative.Interest.Paid = cumsum(PV.Interest.Paid)))
mortgage$PV.Cumulative.Interest.Paid = round(mortgage$PV.Cumulative.Interest.Paid, 0)
mortgage
}
shinyServer(
function(input, output) {
mortgage_data = reactive({
mortgage_manifestation = simulate_data(term=30, principal=input$principal,
down=input$down, interest=input$interest,
irr=input$opportunity_cost)
return(mortgage_manifestation)
})
intial_loan_value = reactive ({
input$principal - (input$principal * input$down)
})
output$plot1 <- renderPlot({
g = ggplot(data=mortgage_data(), aes(x=Month, y=Remaining.Principal))
g + geom_bar(stat='identity', size=0.1, alpha=0.2, fill='blue') +
geom_point(data=mortgage_data(), aes(x=Month, y=PV.Cumulative.Interest.Paid), size=1, alpha=0.5) +
geom_line(data=mortgage_data(), aes(x=Month, y=PV.Cumulative.Interest.Paid), colour='red', size=2, alpha=0.7) +
scale_y_continuous(labels=dollar) +
ggtitle(expression(atop('Remaining Loan Balance (BLUE) v. NPV of Accumulated Interest Payments',
atop(italic('Manipulate Assumptions Using Sliders'))))) +
geom_hline(yintercept=as.numeric(intial_loan_value())) +
labs(y='Dollars')
})
output$mortgage_data = renderDataTable({
mortgage_data()
})
output$npv_interest = reactive({
net = scales::dollar(tail(mortgage_data()$PV.Cumulative.Interest.Paid, 1))
net
})
total_cost_of_loan = reactive({
o = scales::dollar(tail(mortgage_data()$PV.Cumulative.Interest.Paid, 1) + (input$principal - (input$principal * input$down)) )
o
})
output$total_cost_of_loan = reactive({
total_cost_of_loan()
})
output$payment = reactive({
scales::dollar(tail(mortgage_data()$Payment, 1))
})
})
| /Personal/Data-Vis-Apps/Mortgage-Simulator/server.R | no_license | paulmattheww/Original-Projects | R | false | false | 3,588 | r | # server.R
library(shiny)
library(ggplot2)
library(gridExtra)
library(ggthemes)
library(dplyr)
library(scales)
# runApp('C:/Users/Paul/Documents/R/Applications/Mortgage')
# term=30; principal=236000; down=0.2; interest=0.0425; irr=0.05
simulate_data = function(term, principal, down, interest, irr) {
n_months = term * 12
Month = seq(1, n_months, by=1)
monthly_interest_rate = interest / 12
beg_principal = round(principal - (down * principal), 0)
mortgage = data.frame(Month)
payment = beg_principal * ( monthly_interest_rate * (1 + monthly_interest_rate) ^ n_months) /
((1 + monthly_interest_rate) ^ n_months - 1)
payment = round(payment, 0)
mortgage$Payment = payment
mortgage$Remaining.Principal = 0
# B = balance B = L[(1 + c)^n - (1 + c)^p]/[(1 + c)^n - 1]
index = seq(2, n_months)
amt_left = NULL
for (i in 1:length(index)) {
amt_left[i] = beg_principal * ((1 + monthly_interest_rate) ^ n_months - (1 + monthly_interest_rate) ^ i) /
((1 + monthly_interest_rate) ^ n_months - 1)
amt_left
}
mortgage[c(2:n_months),'Remaining.Principal'] = round(amt_left)
mortgage[1,'Remaining.Principal'] = beg_principal
amt_left = mortgage$Remaining.Principal
mortgage$Interest.Paid = round(amt_left * monthly_interest_rate, 0)
index = seq(1, n_months)
raw_int = mortgage$Interest.Paid
monthly_irr = irr / 12
pv_int = NULL
for (i in 1:length(index)) {
pv_int[i] = raw_int[i] / (1 + monthly_irr) ^ index[i]
pv_int
}
mortgage$PV.Interest.Paid = round(pv_int)
mortgage = data.frame(mortgage %>% mutate(PV.Cumulative.Interest.Paid = cumsum(PV.Interest.Paid)))
mortgage$PV.Cumulative.Interest.Paid = round(mortgage$PV.Cumulative.Interest.Paid, 0)
mortgage
}
shinyServer(
function(input, output) {
mortgage_data = reactive({
mortgage_manifestation = simulate_data(term=30, principal=input$principal,
down=input$down, interest=input$interest,
irr=input$opportunity_cost)
return(mortgage_manifestation)
})
intial_loan_value = reactive ({
input$principal - (input$principal * input$down)
})
output$plot1 <- renderPlot({
g = ggplot(data=mortgage_data(), aes(x=Month, y=Remaining.Principal))
g + geom_bar(stat='identity', size=0.1, alpha=0.2, fill='blue') +
geom_point(data=mortgage_data(), aes(x=Month, y=PV.Cumulative.Interest.Paid), size=1, alpha=0.5) +
geom_line(data=mortgage_data(), aes(x=Month, y=PV.Cumulative.Interest.Paid), colour='red', size=2, alpha=0.7) +
scale_y_continuous(labels=dollar) +
ggtitle(expression(atop('Remaining Loan Balance (BLUE) v. NPV of Accumulated Interest Payments',
atop(italic('Manipulate Assumptions Using Sliders'))))) +
geom_hline(yintercept=as.numeric(intial_loan_value())) +
labs(y='Dollars')
})
output$mortgage_data = renderDataTable({
mortgage_data()
})
output$npv_interest = reactive({
net = scales::dollar(tail(mortgage_data()$PV.Cumulative.Interest.Paid, 1))
net
})
total_cost_of_loan = reactive({
o = scales::dollar(tail(mortgage_data()$PV.Cumulative.Interest.Paid, 1) + (input$principal - (input$principal * input$down)) )
o
})
output$total_cost_of_loan = reactive({
total_cost_of_loan()
})
output$payment = reactive({
scales::dollar(tail(mortgage_data()$Payment, 1))
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/count_each_column.R
\name{count_each_column}
\alias{count_each_column}
\title{Counting Each Column and Summarizing in a Matrix}
\usage{
count_each_column(x, answer = NULL, checks = TRUE)
}
\arguments{
\item{x}{a data frame or matrix with at least 1 row and
1 column. NOTE: all column should belong to the same
class (numeric, character).
However, if \code{checks = TRUE}, character and
factor variables can co-exist and logical values are also OK.
If a column has nothing but
NA, it should be remove; otherwise, an error will be raised.}
\item{answer}{the values whose frequencies you
want to know, e. g., "agree" and "disagree"
in your survey data. Default is NULL which means
all possible answers in the whole data will be used.}
\item{checks}{whether to check the validity of the
input data. Default is TRUE. Do not turn it off unless you
are sure that your data has no logical variables or factor
variables and each column has at least 1 non-missing value.}
}
\description{
This function counts the frequencies of each element of each
column of a data frame or matrix. The frequencies of
missing values and the 0 frequencies of non-existent
values are also included in the final result.
}
\examples{
# values that do not appear in
# the data can also be counted.
# a factor will be transformed into
# a character variable automatically.
x1=c("a", "b", "a", "b", NA)
x2=factor(x1)
x3=c("1", "3", "2", "1", "a")
dat=data.frame(x1, x2, x3, stringsAsFactors=FALSE)
res=count_each_column(dat, answer=c("c", "d", NA, "a"))
# logical value is OK.
x1=c(TRUE, TRUE, TRUE)
x2=c(TRUE, NA, NA)
dat=data.frame(x1, x2)
res=count_each_column(dat)
res=count_each_column(dat, c(TRUE, FALSE))
}
| /man/count_each_column.Rd | no_license | cran/plothelper | R | false | true | 1,777 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/count_each_column.R
\name{count_each_column}
\alias{count_each_column}
\title{Counting Each Column and Summarizing in a Matrix}
\usage{
count_each_column(x, answer = NULL, checks = TRUE)
}
\arguments{
\item{x}{a data frame or matrix with at least 1 row and
1 column. NOTE: all column should belong to the same
class (numeric, character).
However, if \code{checks = TRUE}, character and
factor variables can co-exist and logical values are also OK.
If a column has nothing but
NA, it should be remove; otherwise, an error will be raised.}
\item{answer}{the values whose frequencies you
want to know, e. g., "agree" and "disagree"
in your survey data. Default is NULL which means
all possible answers in the whole data will be used.}
\item{checks}{whether to check the validity of the
input data. Default is TRUE. Do not turn it off unless you
are sure that your data has no logical variables or factor
variables and each column has at least 1 non-missing value.}
}
\description{
This function counts the frequencies of each element of each
column of a data frame or matrix. The frequencies of
missing values and the 0 frequencies of non-existent
values are also included in the final result.
}
\examples{
# values that do not appear in
# the data can also be counted.
# a factor will be transformed into
# a character variable automatically.
x1=c("a", "b", "a", "b", NA)
x2=factor(x1)
x3=c("1", "3", "2", "1", "a")
dat=data.frame(x1, x2, x3, stringsAsFactors=FALSE)
res=count_each_column(dat, answer=c("c", "d", NA, "a"))
# logical value is OK.
x1=c(TRUE, TRUE, TRUE)
x2=c(TRUE, NA, NA)
dat=data.frame(x1, x2)
res=count_each_column(dat)
res=count_each_column(dat, c(TRUE, FALSE))
}
|
#' plotme2
#'
#' @title plotme2
#' @Description Used for a 2 variable ggplot
#'
#' @param plotdata data
#' @param x column number of x coordinate
#' @param y column number of y coordinate
#'
#' @return a plot
#'
#' @examples
#' plotme2(data,1,2)
#'
#' @importfrom ggplot2 ggplot
#'
#' @export plotme2
plotme2 <- function(plotdata,x,y)
{ggplot(plotdata) +
aes(x = plotdata[, x], y = plotdata[,y]) +
geom_point(color = "#f15a34") +
ggtitle("") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab(gsub(".", " ", colnames(plotdata[x]), fixed = TRUE)) +
ylab(gsub(".", " ", colnames(plotdata[y]), fixed = TRUE))}
| /R/plotme2.R | no_license | Lewismews/example_package | R | false | false | 637 | r | #' plotme2
#'
#' @title plotme2
#' @Description Used for a 2 variable ggplot
#'
#' @param plotdata data
#' @param x column number of x coordinate
#' @param y column number of y coordinate
#'
#' @return a plot
#'
#' @examples
#' plotme2(data,1,2)
#'
#' @importfrom ggplot2 ggplot
#'
#' @export plotme2
plotme2 <- function(plotdata,x,y)
{ggplot(plotdata) +
aes(x = plotdata[, x], y = plotdata[,y]) +
geom_point(color = "#f15a34") +
ggtitle("") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab(gsub(".", " ", colnames(plotdata[x]), fixed = TRUE)) +
ylab(gsub(".", " ", colnames(plotdata[y]), fixed = TRUE))}
|
\name{ReadDvnFile}
\alias{ReadDvnFile}
\title{Read a DVN File...}
\usage{ReadDvnFile(file, pkg)}
\description{Read a DVN File}
\details{...}
\value{...}
\arguments{\item{file}{a character-string specifying a file name}
\item{pkg}{a character-string specifying a package in which to find a file}}
| /man/ReadDvnFile.Rd | no_license | zeligdev/ZeligDVN | R | false | false | 296 | rd | \name{ReadDvnFile}
\alias{ReadDvnFile}
\title{Read a DVN File...}
\usage{ReadDvnFile(file, pkg)}
\description{Read a DVN File}
\details{...}
\value{...}
\arguments{\item{file}{a character-string specifying a file name}
\item{pkg}{a character-string specifying a package in which to find a file}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter-dm.R
\name{cdm_filter}
\alias{cdm_filter}
\alias{cdm_apply_filters}
\title{Filtering a \code{\link{dm}} object}
\usage{
cdm_filter(dm, table, ...)
cdm_apply_filters(dm)
}
\arguments{
\item{dm}{A \code{dm} object.}
\item{table}{A table in the \code{dm}}
\item{...}{Logical predicates defined in terms of the variables in \code{.data}, passed on to \code{\link[dplyr:filter]{dplyr::filter()}}.
Multiple conditions are combined with \code{&} or \code{,}. Only rows where the condition evaluates
to TRUE are kept.
The arguments in ... are automatically quoted and evaluated in the context of
the data frame. They support unquoting and splicing. See \code{vignette("programming", package = "dplyr")}
for an introduction to these concepts.}
}
\description{
Filtering one table of a \code{\link{dm}} object has an effect on all tables connected to this table
via one or more steps of foreign key relations. Firstly, one or more filter conditions for
one or more tables can be defined using \code{cdm_filter()}, with a syntax similar to \code{dplyr::filter()}.
These conditions will be stored in the \code{\link{dm}} and not immediately executed. With \code{cdm_apply_filters()}
all tables will be updated according to the filter conditions and the foreign key relations.
}
\details{
\code{cdm_filter()} allows you to set one or more filter conditions for one table
of a \code{\link{dm}} object. These conditions will be stored in the \code{\link{dm}} for when they are needed.
Once executed, the filtering the will affect all tables connected to the filtered one by
foreign key constraints, leaving only the rows with the corresponding key values. The filtering
implicitly takes place, once a table is requested from the \code{\link{dm}} by using one of \code{tbl()}, \code{[[.dm()}, \code{$.dm()}.
With \code{cdm_apply_filters()} all set filter conditions are applied and their
combined cascading effect on each table of the \code{\link{dm}} is taken into account, producing a new
\code{dm} object.
This function is called by the \code{compute()} method for \code{dm} class objects.
}
\examples{
library(dplyr)
dm_nyc_filtered <-
cdm_nycflights13() \%>\%
cdm_filter(airports, name == "John F Kennedy Intl")
tbl(dm_nyc_filtered, "flights")
dm_nyc_filtered[["planes"]]
dm_nyc_filtered$airlines
cdm_nycflights13() \%>\%
cdm_filter(airports, name == "John F Kennedy Intl") \%>\%
cdm_apply_filters()
cdm_nycflights13() \%>\%
cdm_filter(flights, month == 3) \%>\%
cdm_apply_filters()
library(dplyr)
cdm_nycflights13() \%>\%
cdm_filter(planes, engine \%in\% c("Reciprocating", "4 Cycle")) \%>\%
compute()
}
| /man/cdm_filter.Rd | permissive | cutterkom/dm | R | false | true | 2,704 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter-dm.R
\name{cdm_filter}
\alias{cdm_filter}
\alias{cdm_apply_filters}
\title{Filtering a \code{\link{dm}} object}
\usage{
cdm_filter(dm, table, ...)
cdm_apply_filters(dm)
}
\arguments{
\item{dm}{A \code{dm} object.}
\item{table}{A table in the \code{dm}}
\item{...}{Logical predicates defined in terms of the variables in \code{.data}, passed on to \code{\link[dplyr:filter]{dplyr::filter()}}.
Multiple conditions are combined with \code{&} or \code{,}. Only rows where the condition evaluates
to TRUE are kept.
The arguments in ... are automatically quoted and evaluated in the context of
the data frame. They support unquoting and splicing. See \code{vignette("programming", package = "dplyr")}
for an introduction to these concepts.}
}
\description{
Filtering one table of a \code{\link{dm}} object has an effect on all tables connected to this table
via one or more steps of foreign key relations. Firstly, one or more filter conditions for
one or more tables can be defined using \code{cdm_filter()}, with a syntax similar to \code{dplyr::filter()}.
These conditions will be stored in the \code{\link{dm}} and not immediately executed. With \code{cdm_apply_filters()}
all tables will be updated according to the filter conditions and the foreign key relations.
}
\details{
\code{cdm_filter()} allows you to set one or more filter conditions for one table
of a \code{\link{dm}} object. These conditions will be stored in the \code{\link{dm}} for when they are needed.
Once executed, the filtering the will affect all tables connected to the filtered one by
foreign key constraints, leaving only the rows with the corresponding key values. The filtering
implicitly takes place, once a table is requested from the \code{\link{dm}} by using one of \code{tbl()}, \code{[[.dm()}, \code{$.dm()}.
With \code{cdm_apply_filters()} all set filter conditions are applied and their
combined cascading effect on each table of the \code{\link{dm}} is taken into account, producing a new
\code{dm} object.
This function is called by the \code{compute()} method for \code{dm} class objects.
}
\examples{
library(dplyr)
dm_nyc_filtered <-
cdm_nycflights13() \%>\%
cdm_filter(airports, name == "John F Kennedy Intl")
tbl(dm_nyc_filtered, "flights")
dm_nyc_filtered[["planes"]]
dm_nyc_filtered$airlines
cdm_nycflights13() \%>\%
cdm_filter(airports, name == "John F Kennedy Intl") \%>\%
cdm_apply_filters()
cdm_nycflights13() \%>\%
cdm_filter(flights, month == 3) \%>\%
cdm_apply_filters()
library(dplyr)
cdm_nycflights13() \%>\%
cdm_filter(planes, engine \%in\% c("Reciprocating", "4 Cycle")) \%>\%
compute()
}
|
## First Lab
character <- "first string in R"
numeric <- 42.2
integer <- 42L
complex <- 42 + 42i
logical <- TRUE
all <- list(character, numeric, integer, complex, logical)
for (var in all) { print(typeof(var)) }
vector1 <- 5:75
vector2 <- c(3.14, 2.71, 0, 13)
vector3 <- rep(TRUE, 100)
m <- rbind(list(0.5, 1.3, 3.5), list(3.9, 131, 2.8), list(0, 2.2, 4.6), list(2, 7, 5.1))
humans <- factor(c(0, 1, 1, 1, 0, 2, 2, 0, 2, 1), levels = c(0, 1, 2))
levels(humans) <- c("baby", "child", "adult")
na_find <- is.na(c( 1, 2, 3, 4, NA, 6, 7, NA, 9, NA, 11))
w_1 <- min(which(na_find == TRUE))
lw <- length(which(na_find == TRUE))
mark <- as.integer(runif(5, min=0L, max=100L))
frame <- data.frame(mark=mark, norm = mark >= 60)
names(frame) <- c("student mark", "enrolled")
## Second Lab
v <- rnorm(100)
head(v[20:length(v)], 10)
y <- data.frame(a = rnorm(100), b = 1:100, cc = sample(letters, 100, replace = TRUE))
tail(y, n=10)
z <- c(1, 2, 3, NA, 4, NA, 5, NA)
mean(z[!is.na(z)])
| /all.R | no_license | livankrekh/R_PythonCourseKNU | R | false | false | 985 | r | ## First Lab
character <- "first string in R"
numeric <- 42.2
integer <- 42L
complex <- 42 + 42i
logical <- TRUE
all <- list(character, numeric, integer, complex, logical)
for (var in all) { print(typeof(var)) }
vector1 <- 5:75
vector2 <- c(3.14, 2.71, 0, 13)
vector3 <- rep(TRUE, 100)
m <- rbind(list(0.5, 1.3, 3.5), list(3.9, 131, 2.8), list(0, 2.2, 4.6), list(2, 7, 5.1))
humans <- factor(c(0, 1, 1, 1, 0, 2, 2, 0, 2, 1), levels = c(0, 1, 2))
levels(humans) <- c("baby", "child", "adult")
na_find <- is.na(c( 1, 2, 3, 4, NA, 6, 7, NA, 9, NA, 11))
w_1 <- min(which(na_find == TRUE))
lw <- length(which(na_find == TRUE))
mark <- as.integer(runif(5, min=0L, max=100L))
frame <- data.frame(mark=mark, norm = mark >= 60)
names(frame) <- c("student mark", "enrolled")
## Second Lab
v <- rnorm(100)
head(v[20:length(v)], 10)
y <- data.frame(a = rnorm(100), b = 1:100, cc = sample(letters, 100, replace = TRUE))
tail(y, n=10)
z <- c(1, 2, 3, NA, 4, NA, 5, NA)
mean(z[!is.na(z)])
|
## Part 1 ----
(observed_cor <- cor(cars$speed, cars$dist))
## Part 2 ----
n_reps <- 10000
perm_cars <- cars
perm_stats <- replicate(n_reps, {
perm_cars$speed <- sample(perm_cars$speed)
cor(perm_cars$speed, perm_cars$dist)
})
## Part 3 ----
library(ggplot2)
ggplot(mapping = aes(x = perm_stats)) +
geom_density() +
geom_vline(xintercept = observed_cor, col = "red") +
theme_bw()
## Part 4 ----
(p_val <- mean(perm_stats >= observed_cor))
## Part 5 ----
p_val + c(lower = -1, p_value = 0, upper = 1) * qnorm(0.975) * sqrt(p_val * (1 - p_val) / length(perm_stats))
## Part 6 ---
# We can reject the null hypothesis and conclude there is a significant positive
# correlation between speed and stopping distance. | /solutions/06/exam2_cars_sol.R | no_license | byu-stat-223/practice-finals | R | false | false | 723 | r | ## Part 1 ----
(observed_cor <- cor(cars$speed, cars$dist))
## Part 2 ----
n_reps <- 10000
perm_cars <- cars
perm_stats <- replicate(n_reps, {
perm_cars$speed <- sample(perm_cars$speed)
cor(perm_cars$speed, perm_cars$dist)
})
## Part 3 ----
library(ggplot2)
ggplot(mapping = aes(x = perm_stats)) +
geom_density() +
geom_vline(xintercept = observed_cor, col = "red") +
theme_bw()
## Part 4 ----
(p_val <- mean(perm_stats >= observed_cor))
## Part 5 ----
p_val + c(lower = -1, p_value = 0, upper = 1) * qnorm(0.975) * sqrt(p_val * (1 - p_val) / length(perm_stats))
## Part 6 ---
# We can reject the null hypothesis and conclude there is a significant positive
# correlation between speed and stopping distance. |
# source reactive expressions
source("external/appSourceFiles/reactives.R",local=T)
source("external/appSourceFiles/movie_benchmark_plot.R",local=T)
# Primary outputs ---------------------------------------------------------
output$ex1 <- DT::renderDataTable({
if (input$password_input == password) {
action = dataTableAjax(session, movie.tbl.format())
DT::datatable(movie.tbl.format(),
options = list(pageLength = 10, searching = TRUE,
ajax = list(url = action)))
} else NULL
})
output$ex2 <- DT::renderDataTable({
if (input$password_input == password) {
action = dataTableAjax(session, strata.tbl())
DT::datatable(strata.tbl(),
options = list(pageLength = 10, searching = FALSE,
ajax = list(url = action)))
} else NULL
})
output$summary1<- renderPrint({
if (input$password_input == password) {
dataset <- movie.tbl() %>%
select(classBO, Country, D_Gen8)
Hmisc::describe(dataset)
} else NULL
})
output$summary2<- renderPrint({
if (input$password_input == password) {
dataset <- strata.tbl() %>%
select(classBO, Country, D_Gen8,
matches("Aware|DI|FirstChoice"))
Hmisc::describe(dataset)
} else NULL
})
output$pred_output <- renderTable({
if (input$password_input == password) {
pred_out <- pred_data()
pred_out <- pred_out %>% arrange(desc(ReleaseDate))
pred_out$ReleaseDate <- date_format()(pred_out$ReleaseDate) # date format
pred_out$WeekendBO <- dollar_format()(pred_out$WeekendBO) # dollar format
# percentage format
for (j in which(names(pred_out) %in% c("C","B","A--","A-","A","AA","AAA"))) {
pred_out[j] <- scales::percent(pred_out[,j])
}
pred_out
} else NULL
})
output$output_benchmark_plot <- renderPlot({
input$goButton
isolate({
if (input$password_input == password) {
movie_benchmark_plot()
} else NULL
})
},
width=800
)
output$output_benchmark_table <- renderTable({
input$goButton
isolate({
if (!is.null(movie_benchmark()) && input$password_input == password) {
movie_benchmark <- movie_benchmark()
movie_benchmark %>%
dplyr::select(EngTitle, ChiTitle, wk_before_release, DI, Aware, FirstChoice) %>%
mutate(wk_before_release=as.integer(wk_before_release)) %>%
mutate(DI = scales::percent(DI),
Aware = scales::percent(Aware),
FirstChoice = scales::percent(FirstChoice)
)
} else NULL
})
})
| /external/app.R | no_license | leoluyi/disney_movie_app | R | false | false | 2,543 | r | # source reactive expressions
source("external/appSourceFiles/reactives.R",local=T)
source("external/appSourceFiles/movie_benchmark_plot.R",local=T)
# Primary outputs ---------------------------------------------------------
output$ex1 <- DT::renderDataTable({
if (input$password_input == password) {
action = dataTableAjax(session, movie.tbl.format())
DT::datatable(movie.tbl.format(),
options = list(pageLength = 10, searching = TRUE,
ajax = list(url = action)))
} else NULL
})
output$ex2 <- DT::renderDataTable({
if (input$password_input == password) {
action = dataTableAjax(session, strata.tbl())
DT::datatable(strata.tbl(),
options = list(pageLength = 10, searching = FALSE,
ajax = list(url = action)))
} else NULL
})
output$summary1<- renderPrint({
if (input$password_input == password) {
dataset <- movie.tbl() %>%
select(classBO, Country, D_Gen8)
Hmisc::describe(dataset)
} else NULL
})
output$summary2<- renderPrint({
if (input$password_input == password) {
dataset <- strata.tbl() %>%
select(classBO, Country, D_Gen8,
matches("Aware|DI|FirstChoice"))
Hmisc::describe(dataset)
} else NULL
})
output$pred_output <- renderTable({
if (input$password_input == password) {
pred_out <- pred_data()
pred_out <- pred_out %>% arrange(desc(ReleaseDate))
pred_out$ReleaseDate <- date_format()(pred_out$ReleaseDate) # date format
pred_out$WeekendBO <- dollar_format()(pred_out$WeekendBO) # dollar format
# percentage format
for (j in which(names(pred_out) %in% c("C","B","A--","A-","A","AA","AAA"))) {
pred_out[j] <- scales::percent(pred_out[,j])
}
pred_out
} else NULL
})
output$output_benchmark_plot <- renderPlot({
input$goButton
isolate({
if (input$password_input == password) {
movie_benchmark_plot()
} else NULL
})
},
width=800
)
output$output_benchmark_table <- renderTable({
input$goButton
isolate({
if (!is.null(movie_benchmark()) && input$password_input == password) {
movie_benchmark <- movie_benchmark()
movie_benchmark %>%
dplyr::select(EngTitle, ChiTitle, wk_before_release, DI, Aware, FirstChoice) %>%
mutate(wk_before_release=as.integer(wk_before_release)) %>%
mutate(DI = scales::percent(DI),
Aware = scales::percent(Aware),
FirstChoice = scales::percent(FirstChoice)
)
} else NULL
})
})
|
getXSkip = function(x, pastrounds)
{
interval = x[['treatment_interval']]
data_pastrounds = as.numeric(x[['pastrounds']])
data_futurerounds = as.numeric(x[['futurerounds']])
skip = 0
if((data_pastrounds == pastrounds) && (data_futurerounds > 0)){
if(interval == "Future annual treatment"){
skip = 4
}else if(interval == "Future semiannual treatment"){
skip = 2
}else if(interval == "Future quarterly treatment"){
skip = 1
}
}
skip
}
calculate_coords = function(data, pastrounds)
{
max_x = max(data$futurerounds) + max(data$pastrounds)
data$x = (data$pastrounds * 4) + (data$futurerounds * apply(data, 1, getXSkip, pastrounds=pastrounds))
data$y = (data$elimination_probability * max(data$x))
data
} | /lib/calculate_coords.R | no_license | devbin/vidivisus | R | false | false | 762 | r | getXSkip = function(x, pastrounds)
{
interval = x[['treatment_interval']]
data_pastrounds = as.numeric(x[['pastrounds']])
data_futurerounds = as.numeric(x[['futurerounds']])
skip = 0
if((data_pastrounds == pastrounds) && (data_futurerounds > 0)){
if(interval == "Future annual treatment"){
skip = 4
}else if(interval == "Future semiannual treatment"){
skip = 2
}else if(interval == "Future quarterly treatment"){
skip = 1
}
}
skip
}
calculate_coords = function(data, pastrounds)
{
max_x = max(data$futurerounds) + max(data$pastrounds)
data$x = (data$pastrounds * 4) + (data$futurerounds * apply(data, 1, getXSkip, pastrounds=pastrounds))
data$y = (data$elimination_probability * max(data$x))
data
} |
#' Wrapper function for conditional independence tests.
#'
#' @description Tests the null hypothesis that Y and E are independent given X.
#'
#' @param Y An n-dimensional vector or a matrix or dataframe with n rows and p columns.
#' @param E An n-dimensional vector or a matrix or dataframe with n rows and p columns.
#' @param X An n-dimensional vector or a matrix or dataframe with n rows and p columns.
#' @param method The conditional indepdence test to use, can be one of
#' \code{"KCI"}, \code{"InvariantConditionalQuantilePrediction"}, \code{"InvariantEnvironmentPrediction"},
#' \code{"InvariantResidualDistributionTest"}, \code{"InvariantTargetPrediction"}, \code{"ResidualPredictionTest"}.
#' @param alpha Significance level. Defaults to 0.05.
#' @param parsMethod Named list to pass options to \code{method}.
#' @param verbose If \code{TRUE}, intermediate output is provided. Defaults to \code{FALSE}.
#'
#' @return A list with the p-value of the test (\code{pvalue}) and possibly additional
#' entries, depending on the output of the chosen conditional independence test in \code{method}.
#'
#' @references Please cite
#' C. Heinze-Deml, J. Peters and N. Meinshausen: "Invariant Causal Prediction for Nonlinear Models",
#' \href{https://arxiv.org/abs/1706.08576}{arXiv:1706.08576}
#' and the corresponding reference for the conditional independence test.
#'
#' @examples
#'
#' # Example 1
#' set.seed(1)
#' n <- 100
#' Z <- rnorm(n)
#' X <- 4 + 2 * Z + rnorm(n)
#' Y <- 3 * X^2 + Z + rnorm(n)
#' test1 <- CondIndTest(X,Y,Z, method = "KCI")
#' cat("These data come from a distribution, for which X and Y are NOT
#' cond. ind. given Z.")
#' cat(paste("The p-value of the test is: ", test1$pvalue))
#'
#' # Example 2
#' set.seed(1)
#' Z <- rnorm(n)
#' X <- 4 + 2 * Z + rnorm(n)
#' Y <- 3 + Z + rnorm(n)
#' test2 <- CondIndTest(X,Y,Z, method = "KCI")
#' cat("The data come from a distribution, for which X and Y are cond.
#' ind. given Z.")
#' cat(paste("The p-value of the test is: ", test2$pvalue))
#'
CondIndTest <- function(Y, E, X,
method = "KCI",
alpha = 0.05,
parsMethod = list(),
verbose = FALSE){
# if dimY and/or dimE are larger than 1, apply appropriate correction
# according to method
dimY <- NCOL(Y)
dimE <- NCOL(E)
# for these tests we do *not need* to apply Bonf. correction when dimY > 1
if(method %in% c("KCI", "InvariantEnvironmentPrediction")) dimY <- 1
# for these tests we *need to* apply Bonf. correction when dimE > 1
if(!(method %in% c("InvariantEnvironmentPrediction",
"InvariantResidualDistributionTest",
"InvariantConditionalQuantilePrediction"))) dimE <- 1
nTests <- dimY*dimE
results <- vector("list", nTests)
# names(results) <- paste("Y", 1:nTests, sep = "")
pval_bonf <- 1
k <- 1
for(de in 1:dimE){
for(dy in 1:dimY){
argsSet <- list(Y = if(dimY > 1) Y[, dy] else Y,
E = if(dimE > 1) E[, de] else E,
X = X,
alpha = alpha,
verbose = verbose)
switch(method,
"KCI" = {
result <- do.call(KCI, c(argsSet, parsMethod))
},
"InvariantConditionalQuantilePrediction" = {
result <- do.call(InvariantConditionalQuantilePrediction, c(argsSet, parsMethod))
},
"InvariantEnvironmentPrediction" = {
result <- do.call(InvariantEnvironmentPrediction, c(argsSet, parsMethod))
},
"InvariantResidualDistributionTest" = {
result <- do.call(InvariantResidualDistributionTest, c(argsSet, parsMethod))
},
"InvariantTargetPrediction" = {
result <- do.call(InvariantTargetPrediction, c(argsSet, parsMethod))
},
"ResidualPredictionTest" = {
result <- do.call(ResidualPredictionTest, c(argsSet, parsMethod))
},
{
stop(paste("Method ", method," not implemented"))
}
)
pval_bonf <- min(pval_bonf, result$pvalue)
results[[k]] <- result
names(results[[k]])[which(names(results[[k]]) == "pvalue")] <- "pvalue_individual"
if(dimY > 1 & dimE > 1)
name <- paste("Y", dy, "E", de, sep = "")
else if(dimY > 1)
name <- paste("Y", dy, sep = "")
else if(dimE > 1)
name <- paste("E", de, sep = "")
else
name <- paste("Y", dy, sep = "")
names(results)[[k]] <- name
k <- k+1
}
}
results$pvalue <- min(1, pval_bonf*nTests)
return(results)
}
| /CondIndTests/R/condIndTest.R | no_license | christinaheinze/nonlinearICP-and-CondIndTests | R | false | false | 4,752 | r | #' Wrapper function for conditional independence tests.
#'
#' @description Tests the null hypothesis that Y and E are independent given X.
#'
#' @param Y An n-dimensional vector or a matrix or dataframe with n rows and p columns.
#' @param E An n-dimensional vector or a matrix or dataframe with n rows and p columns.
#' @param X An n-dimensional vector or a matrix or dataframe with n rows and p columns.
#' @param method The conditional indepdence test to use, can be one of
#' \code{"KCI"}, \code{"InvariantConditionalQuantilePrediction"}, \code{"InvariantEnvironmentPrediction"},
#' \code{"InvariantResidualDistributionTest"}, \code{"InvariantTargetPrediction"}, \code{"ResidualPredictionTest"}.
#' @param alpha Significance level. Defaults to 0.05.
#' @param parsMethod Named list to pass options to \code{method}.
#' @param verbose If \code{TRUE}, intermediate output is provided. Defaults to \code{FALSE}.
#'
#' @return A list with the p-value of the test (\code{pvalue}) and possibly additional
#' entries, depending on the output of the chosen conditional independence test in \code{method}.
#'
#' @references Please cite
#' C. Heinze-Deml, J. Peters and N. Meinshausen: "Invariant Causal Prediction for Nonlinear Models",
#' \href{https://arxiv.org/abs/1706.08576}{arXiv:1706.08576}
#' and the corresponding reference for the conditional independence test.
#'
#' @examples
#'
#' # Example 1
#' set.seed(1)
#' n <- 100
#' Z <- rnorm(n)
#' X <- 4 + 2 * Z + rnorm(n)
#' Y <- 3 * X^2 + Z + rnorm(n)
#' test1 <- CondIndTest(X,Y,Z, method = "KCI")
#' cat("These data come from a distribution, for which X and Y are NOT
#' cond. ind. given Z.")
#' cat(paste("The p-value of the test is: ", test1$pvalue))
#'
#' # Example 2
#' set.seed(1)
#' Z <- rnorm(n)
#' X <- 4 + 2 * Z + rnorm(n)
#' Y <- 3 + Z + rnorm(n)
#' test2 <- CondIndTest(X,Y,Z, method = "KCI")
#' cat("The data come from a distribution, for which X and Y are cond.
#' ind. given Z.")
#' cat(paste("The p-value of the test is: ", test2$pvalue))
#'
CondIndTest <- function(Y, E, X,
method = "KCI",
alpha = 0.05,
parsMethod = list(),
verbose = FALSE){
# if dimY and/or dimE are larger than 1, apply appropriate correction
# according to method
dimY <- NCOL(Y)
dimE <- NCOL(E)
# for these tests we do *not need* to apply Bonf. correction when dimY > 1
if(method %in% c("KCI", "InvariantEnvironmentPrediction")) dimY <- 1
# for these tests we *need to* apply Bonf. correction when dimE > 1
if(!(method %in% c("InvariantEnvironmentPrediction",
"InvariantResidualDistributionTest",
"InvariantConditionalQuantilePrediction"))) dimE <- 1
nTests <- dimY*dimE
results <- vector("list", nTests)
# names(results) <- paste("Y", 1:nTests, sep = "")
pval_bonf <- 1
k <- 1
for(de in 1:dimE){
for(dy in 1:dimY){
argsSet <- list(Y = if(dimY > 1) Y[, dy] else Y,
E = if(dimE > 1) E[, de] else E,
X = X,
alpha = alpha,
verbose = verbose)
switch(method,
"KCI" = {
result <- do.call(KCI, c(argsSet, parsMethod))
},
"InvariantConditionalQuantilePrediction" = {
result <- do.call(InvariantConditionalQuantilePrediction, c(argsSet, parsMethod))
},
"InvariantEnvironmentPrediction" = {
result <- do.call(InvariantEnvironmentPrediction, c(argsSet, parsMethod))
},
"InvariantResidualDistributionTest" = {
result <- do.call(InvariantResidualDistributionTest, c(argsSet, parsMethod))
},
"InvariantTargetPrediction" = {
result <- do.call(InvariantTargetPrediction, c(argsSet, parsMethod))
},
"ResidualPredictionTest" = {
result <- do.call(ResidualPredictionTest, c(argsSet, parsMethod))
},
{
stop(paste("Method ", method," not implemented"))
}
)
pval_bonf <- min(pval_bonf, result$pvalue)
results[[k]] <- result
names(results[[k]])[which(names(results[[k]]) == "pvalue")] <- "pvalue_individual"
if(dimY > 1 & dimE > 1)
name <- paste("Y", dy, "E", de, sep = "")
else if(dimY > 1)
name <- paste("Y", dy, sep = "")
else if(dimE > 1)
name <- paste("E", de, sep = "")
else
name <- paste("Y", dy, sep = "")
names(results)[[k]] <- name
k <- k+1
}
}
results$pvalue <- min(1, pval_bonf*nTests)
return(results)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occMod.R
\name{occMod}
\alias{occMod}
\title{Run occupancy models in \code{rapidPop}.}
\usage{
occMod(
input_file = NULL,
input_file_type = ".txt",
nm1 = "day01",
nmF = "day12",
parameter = FALSE,
nm_parameter,
digits = 3,
shiny = FALSE
)
}
\arguments{
\item{input_file}{The path to and name of your input file. This package includes an
\code{example_input_file.txt} that might be helpful.}
\item{input_file_type}{The file type (or extenstion). Options are c(".txt", ".csv")}
\item{nm1}{The name of the first column containing occupancy observation data.
Your \code{input_file} must have some columns contatining information about
whether there were animals in your observations. These columns can be either
0s and 1s or the number of animals in the image. \code{nm1} is the name (or header)
of the first column containing these data.}
\item{nmF}{The name of the final column containing occupancy data.}
\item{parameter}{logical. Do you want to model the effect of a parameter on occupancy?
For example, do you have a column indicating if these observations came from
before or after control operations? If so, you could model the effect of this
parameter on occupancy.}
\item{nm_parameter}{The name of the column for your \code{parameter}. If you
are modeling a paramter (\code{parameter = TRUE}), this is the column name
for the paramter you want to use.}
\item{digits}{The number of digits to round to. This number is passed to the
function \code{round}}
}
\description{
Run occupancy models in \code{rapidPop}.
}
| /man/occMod.Rd | no_license | mikeyEcology/rapidPop | R | false | true | 1,620 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occMod.R
\name{occMod}
\alias{occMod}
\title{Run occupancy models in \code{rapidPop}.}
\usage{
occMod(
input_file = NULL,
input_file_type = ".txt",
nm1 = "day01",
nmF = "day12",
parameter = FALSE,
nm_parameter,
digits = 3,
shiny = FALSE
)
}
\arguments{
\item{input_file}{The path to and name of your input file. This package includes an
\code{example_input_file.txt} that might be helpful.}
\item{input_file_type}{The file type (or extenstion). Options are c(".txt", ".csv")}
\item{nm1}{The name of the first column containing occupancy observation data.
Your \code{input_file} must have some columns contatining information about
whether there were animals in your observations. These columns can be either
0s and 1s or the number of animals in the image. \code{nm1} is the name (or header)
of the first column containing these data.}
\item{nmF}{The name of the final column containing occupancy data.}
\item{parameter}{logical. Do you want to model the effect of a parameter on occupancy?
For example, do you have a column indicating if these observations came from
before or after control operations? If so, you could model the effect of this
parameter on occupancy.}
\item{nm_parameter}{The name of the column for your \code{parameter}. If you
are modeling a paramter (\code{parameter = TRUE}), this is the column name
for the paramter you want to use.}
\item{digits}{The number of digits to round to. This number is passed to the
function \code{round}}
}
\description{
Run occupancy models in \code{rapidPop}.
}
|
setwd('~/Projects/NKIProjects/LNCGH/DNAcopyData/')
files <- dir(pattern='txt')
tempData <- read.delim(files[1], stringsAsFactors=FALSE)
probes <- tempData$PROBE_ID
dataList <- vector(mode='list', length=length(files))
names(dataList) <- unlist(lapply(strsplit(
files, split='_'), function (x) {return(x[1])}))
for (i in 1:length(files)) {
dataList[[i]] <- read.delim(files[i], stringsAsFactors=FALSE)[,4]
}
dataFr <- as.data.frame(do.call('cbind', dataList))
dataFr <- cbind(probes, dataFr)
write.table(x=dataFr, file='../GEOprobedata.txt', sep='\t',
quote=FALSE, row.names=FALSE)
| /mariekeNL_produceGEOdata.R | no_license | ChrisKlijn/LNCGH | R | false | false | 592 | r | setwd('~/Projects/NKIProjects/LNCGH/DNAcopyData/')
files <- dir(pattern='txt')
tempData <- read.delim(files[1], stringsAsFactors=FALSE)
probes <- tempData$PROBE_ID
dataList <- vector(mode='list', length=length(files))
names(dataList) <- unlist(lapply(strsplit(
files, split='_'), function (x) {return(x[1])}))
for (i in 1:length(files)) {
dataList[[i]] <- read.delim(files[i], stringsAsFactors=FALSE)[,4]
}
dataFr <- as.data.frame(do.call('cbind', dataList))
dataFr <- cbind(probes, dataFr)
write.table(x=dataFr, file='../GEOprobedata.txt', sep='\t',
quote=FALSE, row.names=FALSE)
|
library(ggplot2)
N <- 20
df1 <- data.frame(x=sort(rnorm(N)),y=sort(rnorm(N)))
df2 <- data.frame(x=df1$x+0.1*rnorm(N),y=df1$y+0.1*rnorm(N))
p1 <- ggplot(df1,aes(x,y,color=x+y))+
geom_line(size=1)+
geom_point(shape=16,size=5)+
guides(color=guide_colorbar(title = "Point\nLine"))+
labs(title = "所有图层共享数据源和视觉通道映射")
p2 <- ggplot(df1,aes(x,y))+
geom_line(aes(color=x+y),size=1)+
geom_point(aes(fill=x+y),size=5,color="black",shape=21)+
scale_fill_distiller(name="Point",palette="YlOrRd")+
scale_color_distiller(name="Line",palette = "Blues")+
labs(title="所有图层仅共享数据源")
p3 <- ggplot()+
geom_line(aes(x,y,color=x+y),df1,size=1)+
geom_point(aes(x,y,fill=x+y),df2,color="black",shape=21,size=5)+
scale_fill_distiller(name="Point",palette = "YlOrRd")+
scale_color_distiller(name="Line",palette = "Blues")+
labs(title="各图层对象均使用独立的数据源与视觉通道映射")
library(gridExtra)
grid.arrange(p1,p2,p3, ncol = 3, nrow = 1) | /可视化之美/1.R | no_license | MEP218713/R-study | R | false | false | 1,044 | r | library(ggplot2)
N <- 20
df1 <- data.frame(x=sort(rnorm(N)),y=sort(rnorm(N)))
df2 <- data.frame(x=df1$x+0.1*rnorm(N),y=df1$y+0.1*rnorm(N))
p1 <- ggplot(df1,aes(x,y,color=x+y))+
geom_line(size=1)+
geom_point(shape=16,size=5)+
guides(color=guide_colorbar(title = "Point\nLine"))+
labs(title = "所有图层共享数据源和视觉通道映射")
p2 <- ggplot(df1,aes(x,y))+
geom_line(aes(color=x+y),size=1)+
geom_point(aes(fill=x+y),size=5,color="black",shape=21)+
scale_fill_distiller(name="Point",palette="YlOrRd")+
scale_color_distiller(name="Line",palette = "Blues")+
labs(title="所有图层仅共享数据源")
p3 <- ggplot()+
geom_line(aes(x,y,color=x+y),df1,size=1)+
geom_point(aes(x,y,fill=x+y),df2,color="black",shape=21,size=5)+
scale_fill_distiller(name="Point",palette = "YlOrRd")+
scale_color_distiller(name="Line",palette = "Blues")+
labs(title="各图层对象均使用独立的数据源与视觉通道映射")
library(gridExtra)
grid.arrange(p1,p2,p3, ncol = 3, nrow = 1) |
library(Hmisc)
library(quanteda)
library(tm)
library(NLP)
library(openNLP)
library(jiebaR)
library(reshape)
library(igraph)
library(statnet)
library(dils)
library(sna)
all_code <- read.csv("D:/論文/上市櫃電子工業代碼.csv")
all_code <- all_code$Code
all_code <- c(all_code,"3291","4962","3411","3474","3061","8172","6416","5466")
dir.list = list.files("D:/論文/yahoo_news" , full.name = TRUE)
cc1 = worker()
codelist1 <- list()
for(i in 1:length(dir.list)){
file1 = dir.list[i]
s1 =readLines(file1,encoding="ASCII")
s1 = toString(s1)
s1 = gsub("[0-1][0][0-9]",replacement = "",s1)
s1 = gsub("[1-2][0-2][0-4][0-9]",replacement = "",s1)
s1 = gsub("[0-9][0-9][0][0]",replacement = "",s1)
s1 = gsub("[億萬元年]",replacement = "",s1)
segment <- cc1[s1]
temp <- which(segment %in% all_code)
codelist1[[i]] <- segment[temp]
}
codelist1 <- t(sapply(codelist1, '[', seq(max(lengths(codelist1)))))
write.csv(codelist1,file = "D:/論文/SNA/companycode.csv")
input <- read.csv("D:/論文/SNA/companycode.csv")
#input <- sapply(input, as.character)
input[is.na(input)] <- " "
#input <- as.data.frame(df)
write.csv(input,file = "D:/論文/SNA/companycode_input.csv")
lines=scan(file="D:/論文/SNA/companycode_input.csv",what="character",sep="\n",skip=1) # read the csv file (skipping the header), line-by-line as character string.
lines=gsub(","," ",lines) # replace commas with spaces
lines=gsub("[ ]+$","",gsub("[ ]+"," ",lines)) # remove trailing and multiple spaces.
adjlist=strsplit(lines," ") # splits the character strings into list with different vector for each line
col1=unlist(lapply(adjlist,function(x) rep(x[1],length(x)-1))) # establish first column of edgelist by replicating the 1st element (=ID number) by the length of the line minus 1 (itself)
col2=unlist(lapply(adjlist,"[",-1)) # the second line I actually don't fully understand this command, but it takes the rest of the ID numbers in the character string and transposes it to list vertically
el1=cbind(col1,col2) # creates the edgelist by combining column 1 and 2.
#graph_from_edgelist(el, directed = TRUE)
g1=graph.data.frame(el1,directed=F)
plot(g1)
plot(g1, vertex.label = NA, vertex.shape="sphere", vertex.size=3,edge.arrow.size=.2, edge.color="gray80",
vertex.color="orange", vertex.frame.color="#ffffff",
vertex.label.color="black")
#plot(g, layout=layout_with_fr(g))
d <- degree(g1)
# outd <- degree(g1,cmode="outdegree")
# ind <- degree(g1,cmode="indegree")
vb <- betweenness(g1,directed = F,weights = NULL)
eb <- edge_betweenness(g1,directed = F,weights = NULL)
clo <- closeness(g1)
A1 <- as_adjacency_matrix(g1,type="both",names=TRUE,sparse=FALSE,attr=NULL)
d1 <- centr_degree(g1, mode = "all", loops = T, normalized = F)
vb1 <- centr_betw(g1, directed = F, nobigint = T, normalized = F)
clo1 <- centr_clo(g1, mode = "all", normalized = F)
write.csv(el1,file = "D:/論文/SNA/edgelist.csv")
write.csv(A1,file = "D:/論文/SNA/adjacency_matrix.csv")
write.csv(d,file = "D:/論文/SNA/alldegree_igraph.csv")
write.csv(vb,file = "D:/論文/SNA/vertexbetweenness_igraph.csv")
write.csv(eb,file = "D:/論文/SNA/edgebetweenness_igraph.csv")
write.csv(clo,file = "D:/論文/SNA/closeness_igraph.csv")
write.csv(d1,file = "D:/論文/SNA/alldegree_i.csv")
write.csv(vb1,file = "D:/論文/SNA/vertexbetweenness_i.csv")
write.csv(clo1,file = "D:/論文/SNA/closeness_i.csv")
| /SNA.R | no_license | bruce820614/Thesis | R | false | false | 3,390 | r | library(Hmisc)
library(quanteda)
library(tm)
library(NLP)
library(openNLP)
library(jiebaR)
library(reshape)
library(igraph)
library(statnet)
library(dils)
library(sna)
all_code <- read.csv("D:/論文/上市櫃電子工業代碼.csv")
all_code <- all_code$Code
all_code <- c(all_code,"3291","4962","3411","3474","3061","8172","6416","5466")
dir.list = list.files("D:/論文/yahoo_news" , full.name = TRUE)
cc1 = worker()
codelist1 <- list()
for(i in 1:length(dir.list)){
file1 = dir.list[i]
s1 =readLines(file1,encoding="ASCII")
s1 = toString(s1)
s1 = gsub("[0-1][0][0-9]",replacement = "",s1)
s1 = gsub("[1-2][0-2][0-4][0-9]",replacement = "",s1)
s1 = gsub("[0-9][0-9][0][0]",replacement = "",s1)
s1 = gsub("[億萬元年]",replacement = "",s1)
segment <- cc1[s1]
temp <- which(segment %in% all_code)
codelist1[[i]] <- segment[temp]
}
codelist1 <- t(sapply(codelist1, '[', seq(max(lengths(codelist1)))))
write.csv(codelist1,file = "D:/論文/SNA/companycode.csv")
input <- read.csv("D:/論文/SNA/companycode.csv")
#input <- sapply(input, as.character)
input[is.na(input)] <- " "
#input <- as.data.frame(df)
write.csv(input,file = "D:/論文/SNA/companycode_input.csv")
lines=scan(file="D:/論文/SNA/companycode_input.csv",what="character",sep="\n",skip=1) # read the csv file (skipping the header), line-by-line as character string.
lines=gsub(","," ",lines) # replace commas with spaces
lines=gsub("[ ]+$","",gsub("[ ]+"," ",lines)) # remove trailing and multiple spaces.
adjlist=strsplit(lines," ") # splits the character strings into list with different vector for each line
col1=unlist(lapply(adjlist,function(x) rep(x[1],length(x)-1))) # establish first column of edgelist by replicating the 1st element (=ID number) by the length of the line minus 1 (itself)
col2=unlist(lapply(adjlist,"[",-1)) # the second line I actually don't fully understand this command, but it takes the rest of the ID numbers in the character string and transposes it to list vertically
el1=cbind(col1,col2) # creates the edgelist by combining column 1 and 2.
#graph_from_edgelist(el, directed = TRUE)
g1=graph.data.frame(el1,directed=F)
plot(g1)
plot(g1, vertex.label = NA, vertex.shape="sphere", vertex.size=3,edge.arrow.size=.2, edge.color="gray80",
vertex.color="orange", vertex.frame.color="#ffffff",
vertex.label.color="black")
#plot(g, layout=layout_with_fr(g))
d <- degree(g1)
# outd <- degree(g1,cmode="outdegree")
# ind <- degree(g1,cmode="indegree")
vb <- betweenness(g1,directed = F,weights = NULL)
eb <- edge_betweenness(g1,directed = F,weights = NULL)
clo <- closeness(g1)
A1 <- as_adjacency_matrix(g1,type="both",names=TRUE,sparse=FALSE,attr=NULL)
d1 <- centr_degree(g1, mode = "all", loops = T, normalized = F)
vb1 <- centr_betw(g1, directed = F, nobigint = T, normalized = F)
clo1 <- centr_clo(g1, mode = "all", normalized = F)
write.csv(el1,file = "D:/論文/SNA/edgelist.csv")
write.csv(A1,file = "D:/論文/SNA/adjacency_matrix.csv")
write.csv(d,file = "D:/論文/SNA/alldegree_igraph.csv")
write.csv(vb,file = "D:/論文/SNA/vertexbetweenness_igraph.csv")
write.csv(eb,file = "D:/論文/SNA/edgebetweenness_igraph.csv")
write.csv(clo,file = "D:/論文/SNA/closeness_igraph.csv")
write.csv(d1,file = "D:/論文/SNA/alldegree_i.csv")
write.csv(vb1,file = "D:/論文/SNA/vertexbetweenness_i.csv")
write.csv(clo1,file = "D:/論文/SNA/closeness_i.csv")
|
library(shiny)
library(shinydashboard)
library(tidyverse)
library(lubridate)
library(incidence)
#library(ggiraph)
library(echarts4r)
library(earlyR)
library(epitrix)
library(shinyjs)
library(shinyWidgets)
library(projections)
library(EpiEstim)
# get data
jhu_url <- paste("https://raw.githubusercontent.com/CSSEGISandData/",
"COVID-19/master/csse_covid_19_data/", "csse_covid_19_time_series/",
"time_series_covid19_confirmed_global.csv", sep = "")
jhu_pop <- paste("https://raw.githubusercontent.com/CSSEGISandData/",
"COVID-19/master/csse_covid_19_data/",
"UID_ISO_FIPS_LookUp_Table.csv", sep = "")
country_pop <- read_csv(jhu_pop) %>%
select(Combined_Key, Population)
confirmed_long_jhu <- read_csv(jhu_url) %>%
rename(province = "Province/State",
country_region = "Country/Region") %>%
pivot_longer(-c(province, country_region, Lat, Long),
names_to = "Date",
values_to = "cumulative_cases") %>%
# adjust JHU dates back one day to reflect US time, more or less
mutate(Date = mdy(Date)) %>%
arrange(country_region, province, Date) %>%
group_by(country_region, province) %>%
mutate(
incident_cases = c(0, diff(cumulative_cases))
) %>%
ungroup() %>%
replace_na(list(province='N/A')) %>%
filter(!str_detect(province, "Recovered"))
country_jhu <- confirmed_long_jhu %>%
group_by(country_region, Date) %>%
summarise(
cumulative_cases = sum(cumulative_cases),
incident_cases = sum(incident_cases)
) %>%
left_join(country_pop, by=c('country_region' = 'Combined_Key')) %>%
mutate(infection_pct = cumulative_cases / Population) %>%
ungroup()
country_jhu %>%
filter(country_region == 'Canada') %>% tail()
# custom results plotting function to avoid the ugly
# TableGrob messages returned by the plotting function in the
# EpiEstim package
plot_Ri <- function(estimate_R_obj) {
p_I <- plot(estimate_R_obj, "incid", add_imported_cases = TRUE) # plots the incidence
p_SI <- plot(estimate_R_obj, "SI") # plots the serial interval distribution
p_Ri <- plot(estimate_R_obj, "R")
return(gridExtra::grid.arrange(p_I, p_SI, p_Ri, ncol = 1))
}
| /app/global.R | no_license | rickyking/covid19-R0-minitor | R | false | false | 2,217 | r | library(shiny)
library(shinydashboard)
library(tidyverse)
library(lubridate)
library(incidence)
#library(ggiraph)
library(echarts4r)
library(earlyR)
library(epitrix)
library(shinyjs)
library(shinyWidgets)
library(projections)
library(EpiEstim)
# get data
jhu_url <- paste("https://raw.githubusercontent.com/CSSEGISandData/",
"COVID-19/master/csse_covid_19_data/", "csse_covid_19_time_series/",
"time_series_covid19_confirmed_global.csv", sep = "")
jhu_pop <- paste("https://raw.githubusercontent.com/CSSEGISandData/",
"COVID-19/master/csse_covid_19_data/",
"UID_ISO_FIPS_LookUp_Table.csv", sep = "")
country_pop <- read_csv(jhu_pop) %>%
select(Combined_Key, Population)
confirmed_long_jhu <- read_csv(jhu_url) %>%
rename(province = "Province/State",
country_region = "Country/Region") %>%
pivot_longer(-c(province, country_region, Lat, Long),
names_to = "Date",
values_to = "cumulative_cases") %>%
# adjust JHU dates back one day to reflect US time, more or less
mutate(Date = mdy(Date)) %>%
arrange(country_region, province, Date) %>%
group_by(country_region, province) %>%
mutate(
incident_cases = c(0, diff(cumulative_cases))
) %>%
ungroup() %>%
replace_na(list(province='N/A')) %>%
filter(!str_detect(province, "Recovered"))
country_jhu <- confirmed_long_jhu %>%
group_by(country_region, Date) %>%
summarise(
cumulative_cases = sum(cumulative_cases),
incident_cases = sum(incident_cases)
) %>%
left_join(country_pop, by=c('country_region' = 'Combined_Key')) %>%
mutate(infection_pct = cumulative_cases / Population) %>%
ungroup()
country_jhu %>%
filter(country_region == 'Canada') %>% tail()
# custom results plotting function to avoid the ugly
# TableGrob messages returned by the plotting function in the
# EpiEstim package
plot_Ri <- function(estimate_R_obj) {
p_I <- plot(estimate_R_obj, "incid", add_imported_cases = TRUE) # plots the incidence
p_SI <- plot(estimate_R_obj, "SI") # plots the serial interval distribution
p_Ri <- plot(estimate_R_obj, "R")
return(gridExtra::grid.arrange(p_I, p_SI, p_Ri, ncol = 1))
}
|
#' Calculate jaccard statistics on two sets of intervals.
#'
#' @param x tbl of intervals
#' @param y tbl of intervals
#' @param strand group intervals by strand
#'
#' @return \code{data_frame} with the following columns:
#' \itemize{
#' \item{\code{len_i}}{ length of the intersection}
#' \item{\code{len_u}}{ length of the union}
#' \item{\code{jaccard}}{ jaccard statistic}
#' \item{\code{n_int}}{ number of intersecting intervals between x and y}
#' }
#'
#' @seealso \url{http://bedtools.readthedocs.org/en/latest/content/tools/jaccard.html}
#'
#' @examples
#' x <- tibble::frame_data(
#' ~chrom, ~start, ~end,
#' "chr1", 10, 20,
#' "chr1", 30, 40
#' )
#'
#' y <- tibble::frame_data(
#' ~chrom, ~start, ~end,
#' "chr1", 15, 20
#' )
#'
#' bed_jaccard(x, y)
#'
#' @export
bed_jaccard <- function(x, y, strand = FALSE) {
res_intersect <- bed_intersect(x, y) %>%
summarize(sum_overlap = sum(.overlap),
n_int = n())
res_x <- mutate(x, .size = end - start) %>%
summarize(sum_x = sum(.size))
res_y <- mutate(y, .size = end - start) %>%
summarize(sum_y = sum(.size))
n_i <- res_intersect$sum_overlap
n <- res_intersect$n_int
n_x <- res_x$sum_x
n_y <- res_y$sum_y
n_u <- n_x + n_y
jaccard <- n_i / (n_u - n_i)
res <- tibble::frame_data(
~len_i, ~len_u, ~jaccard, ~n,
n_i, n_u, jaccard, n
)
res
}
| /R/bed_jaccard.r | no_license | dpastling/valr | R | false | false | 1,422 | r | #' Calculate jaccard statistics on two sets of intervals.
#'
#' @param x tbl of intervals
#' @param y tbl of intervals
#' @param strand group intervals by strand
#'
#' @return \code{data_frame} with the following columns:
#' \itemize{
#' \item{\code{len_i}}{ length of the intersection}
#' \item{\code{len_u}}{ length of the union}
#' \item{\code{jaccard}}{ jaccard statistic}
#' \item{\code{n_int}}{ number of intersecting intervals between x and y}
#' }
#'
#' @seealso \url{http://bedtools.readthedocs.org/en/latest/content/tools/jaccard.html}
#'
#' @examples
#' x <- tibble::frame_data(
#' ~chrom, ~start, ~end,
#' "chr1", 10, 20,
#' "chr1", 30, 40
#' )
#'
#' y <- tibble::frame_data(
#' ~chrom, ~start, ~end,
#' "chr1", 15, 20
#' )
#'
#' bed_jaccard(x, y)
#'
#' @export
bed_jaccard <- function(x, y, strand = FALSE) {
res_intersect <- bed_intersect(x, y) %>%
summarize(sum_overlap = sum(.overlap),
n_int = n())
res_x <- mutate(x, .size = end - start) %>%
summarize(sum_x = sum(.size))
res_y <- mutate(y, .size = end - start) %>%
summarize(sum_y = sum(.size))
n_i <- res_intersect$sum_overlap
n <- res_intersect$n_int
n_x <- res_x$sum_x
n_y <- res_y$sum_y
n_u <- n_x + n_y
jaccard <- n_i / (n_u - n_i)
res <- tibble::frame_data(
~len_i, ~len_u, ~jaccard, ~n,
n_i, n_u, jaccard, n
)
res
}
|
df2m <- function(x)
{
if(!is.null(x)) {
xattr <- attributes(x)
nxa <- names(xattr)
x$intnr <- x$paramnr <- x$varname <- NULL
cn <- colnames(x)
x <- as.matrix(x)
rownames(x) <- 1L:nrow(x)
colnames(x) <- rep(cn, length.out = ncol(x))
for(k in 1L:length(nxa))
if(all(nxa[k] != c("dim", "dimnames", "class", "names", "row.names"))) {
attr(x, nxa[k]) <- xattr[[k]]
}
}
return(x)
}
| /R/df2m.R | no_license | cran/R2BayesX | R | false | false | 439 | r | df2m <- function(x)
{
if(!is.null(x)) {
xattr <- attributes(x)
nxa <- names(xattr)
x$intnr <- x$paramnr <- x$varname <- NULL
cn <- colnames(x)
x <- as.matrix(x)
rownames(x) <- 1L:nrow(x)
colnames(x) <- rep(cn, length.out = ncol(x))
for(k in 1L:length(nxa))
if(all(nxa[k] != c("dim", "dimnames", "class", "names", "row.names"))) {
attr(x, nxa[k]) <- xattr[[k]]
}
}
return(x)
}
|
#' Count the unique values in a tidy data frame.
#' @import dplyr
#' @export
count_unique <- function(frame, id_col) {
frame[[id_col]] %>% na.omit() %>% unique() %>% length()
}
| /wordsintransition/R/reporters.R | no_license | lupyanlab/words-in-transition | R | false | false | 180 | r |
#' Count the unique values in a tidy data frame.
#' @import dplyr
#' @export
count_unique <- function(frame, id_col) {
frame[[id_col]] %>% na.omit() %>% unique() %>% length()
}
|
if ("package:h2o" %in% search()) { detach("package:h2o", unload=TRUE) }
if ("h2o" %in% rownames(installed.packages())) { remove.packages("h2o") }
# Next, we download packages that H2O depends on.
if (! ("methods" %in% rownames(installed.packages()))) { install.packages("methods") }
if (! ("statmod" %in% rownames(installed.packages()))) { install.packages("statmod") }
if (! ("stats" %in% rownames(installed.packages()))) { install.packages("stats") }
if (! ("graphics" %in% rownames(installed.packages()))) { install.packages("graphics") }
if (! ("RCurl" %in% rownames(installed.packages()))) { install.packages("RCurl") }
if (! ("rjson" %in% rownames(installed.packages()))) { install.packages("rjson") }
if (! ("tools" %in% rownames(installed.packages()))) { install.packages("tools") }
if (! ("utils" %in% rownames(installed.packages()))) { install.packages("utils") }
# Now we download, install and initialize the H2O package for R.
install.packages("h2o", type="source", repos=(c("http://h2o-release.s3.amazonaws.com/h2o/rel-simons/7/R")))
library(h2o)
localH2O = h2o.init()
library(h2o)
setwd('~/scripts/titanic_ml_python_h2o/')
#create localh2o instance
localh2o <- h2o.init(ip = 'localhost',
port=54321,
max_mem_size='4g',
nthreads = -1)
# upload data into the local h2o instances
data_path <- "data.csv"
data.hex <- h2o.importFile(localh2o,path = data_path)
# specify the data and class label
data = c(3,5,6,7,8,9,11)
class = 2
data.hex[,2] = as.factor(data.hex[,2])
# set the parameters for data split
data.split<- h2o.splitFrame(data=data.hex,ratios = 0.6)
# Assign the data as training data and testing data
train = data.split[[1]]
test = data.split[[2]]
# build the gbm
data.gbm = h2o.gbm(training_frame=train,
x=data,
y=class,
ntrees=50,
max.depth=5)
# print the gbm information
data.gbm
# predict the result on testing data
pred_frame = h2o.predict(data.gbm,test)
# compute the performance on testing data
#perf = h2o.performance(pred_frame[,3],test[,2])
perf = h2o.performance(data.gbm, test)
perf
# plot auc curve
plot(perf, type = "roc", col = "blue", typ = "b")
# plot the score to get the cut-off
plot(perf, type = "cutoffs", col = "blue")
# build random forest
data.rf = h2o.randomForest(data=train,
x = data,
y = class,
importance = T,
ntree=200,
depth=10)
# predict the result on testing data
rf_frame = h2o.predict(data.rf, test)
# compute the performance on the testing data
perf_rf = h2o.performance(rf_frame[,3],test[,2])
# plot the auc curve
plot(perf_rf, type = "roc", col = "blue", typ = "b")
# plot the score to get the cut-off
plot(perf_rf, type = "cutoffs", col = "blue")
h2o.shutdown(localh2o) | /titanic_3.R | no_license | ritchie-xl/titanic_ml_python_h2o | R | false | false | 2,932 | r | if ("package:h2o" %in% search()) { detach("package:h2o", unload=TRUE) }
if ("h2o" %in% rownames(installed.packages())) { remove.packages("h2o") }
# Next, we download packages that H2O depends on.
if (! ("methods" %in% rownames(installed.packages()))) { install.packages("methods") }
if (! ("statmod" %in% rownames(installed.packages()))) { install.packages("statmod") }
if (! ("stats" %in% rownames(installed.packages()))) { install.packages("stats") }
if (! ("graphics" %in% rownames(installed.packages()))) { install.packages("graphics") }
if (! ("RCurl" %in% rownames(installed.packages()))) { install.packages("RCurl") }
if (! ("rjson" %in% rownames(installed.packages()))) { install.packages("rjson") }
if (! ("tools" %in% rownames(installed.packages()))) { install.packages("tools") }
if (! ("utils" %in% rownames(installed.packages()))) { install.packages("utils") }
# Now we download, install and initialize the H2O package for R.
install.packages("h2o", type="source", repos=(c("http://h2o-release.s3.amazonaws.com/h2o/rel-simons/7/R")))
library(h2o)
localH2O = h2o.init()
library(h2o)
setwd('~/scripts/titanic_ml_python_h2o/')
#create localh2o instance
localh2o <- h2o.init(ip = 'localhost',
port=54321,
max_mem_size='4g',
nthreads = -1)
# upload data into the local h2o instances
data_path <- "data.csv"
data.hex <- h2o.importFile(localh2o,path = data_path)
# specify the data and class label
data = c(3,5,6,7,8,9,11)
class = 2
data.hex[,2] = as.factor(data.hex[,2])
# set the parameters for data split
data.split<- h2o.splitFrame(data=data.hex,ratios = 0.6)
# Assign the data as training data and testing data
train = data.split[[1]]
test = data.split[[2]]
# build the gbm
data.gbm = h2o.gbm(training_frame=train,
x=data,
y=class,
ntrees=50,
max.depth=5)
# print the gbm information
data.gbm
# predict the result on testing data
pred_frame = h2o.predict(data.gbm,test)
# compute the performance on testing data
#perf = h2o.performance(pred_frame[,3],test[,2])
perf = h2o.performance(data.gbm, test)
perf
# plot auc curve
plot(perf, type = "roc", col = "blue", typ = "b")
# plot the score to get the cut-off
plot(perf, type = "cutoffs", col = "blue")
# build random forest
data.rf = h2o.randomForest(data=train,
x = data,
y = class,
importance = T,
ntree=200,
depth=10)
# predict the result on testing data
rf_frame = h2o.predict(data.rf, test)
# compute the performance on the testing data
perf_rf = h2o.performance(rf_frame[,3],test[,2])
# plot the auc curve
plot(perf_rf, type = "roc", col = "blue", typ = "b")
# plot the score to get the cut-off
plot(perf_rf, type = "cutoffs", col = "blue")
h2o.shutdown(localh2o) |
#PJTN feature developmnet
library(glookoGEO)
#Exploratory analysis for one months worth of data
start <- '2016-01-01 00:00:00'
end <- '2016-01-01 23:59:59'
klystron <- 'li22_31'
#PJTN
query <- paste(list("select * from test.pjtn where timestamp >= '", start, "' and timestamp <= '", end, "' and klystron = ", klystron), collapse="")
#query <- paste(list("select * from test.pjtn where timestamp >= '", start, "' and timestamp <= '", end, "'"), collapse="")
pjtn <- glookoGetQuery(query)
#exploratory analysis of pjtn
max(pjtn$timestamp, na.rm=TRUE) #6/30/2017
min(pjtn$timestamp, na.rm=TRUE) #12/20/2016
summary(pjtn$value) #max 10.6 , mean .1, median .1, 21% of values missing... are these contiguous?
nrow(pjtn[pjtn$value > 0.2,])/nrow(pjtn) #22.6% of rows above 0.2
unique(pjtn$status) #0, 17 --> all NA values
unique(pjtn$severity) #0, 3 --> all NA values
#remove NAs from the data set
pjtn <- pjtn[!is.na(pjtn$value),]
##lets start with li_23 only
pjtn <- pjtn[pjtn$klystron=='li22_31',]
jitter_list <- lapply(unique(pjtn$klystron), function(i) {
pjtn_klystron <- pjtn[pjtn$klystron==i,]
jitter_events <- pvEvent(pjtn_klystron, threshold=.15, event_duration = 5)
return(jitter_events)
})
jitter_events <- do.call(rbind, jitter_list)
write.csv(jitter_events, '~/Documents/GitHub/slac-capstone/vikram/jitter_events_5_minutes.csv')
#extract features to predict jitter events
jitter_feature_list <- lapply(unique(jitter_events$klystron), function(i) {
je_klystron <- jitter_events[jitter_events$klystron==i,]
pjtn_klystron <- pjtn[pjtn$klystron==i,]
features_klystron <- timeSeriesFeatureGeneration(pjtn_klystron, je_klystron)
return(features_klystron)
})
#Now we're going to want to something similar with the following:
#sigma, mod_thy_resv, mod_thy_htr, swrd, focus_i, room
#extract time into variables
library(lubridate)
hour_data <- hour(pjtn$timestamp)
day_data <- day(pjtn$timestamp)
month_data <- month(pjtn$timestamp)
year_data <- year(pjtn$timestamp)
minute_data <- minute(pjtn$timestamp)
pjtn <- cbind(pjtn, minute_data, hour_data, day_data, month_data, year_data)
#Do the same with the jitter data
hour_data <- hour(jitter_events$timestamp)
day_data <- day(jitter_events$timestamp)
month_data <- month(jitter_events$timestamp)
year_data <- year(jitter_events$timestamp)
minute_data <- minute(jitter_events$timestamp)
jitter_events <- cbind(jitter_events, minute_data, hour_data, day_data, month_data, year_data)
write.csv(jitter_events, '~/Documents/GitHub/slac-capstone/vikram/jitter_events_5_minutes_time_features.csv')
##More exploratory analysis
#91% of the days had a jitter event... that means yesterdays data probably isn't going to be particularly predictive
#... needs to be more granular, or the PV is always fucked up at this machine
#every hour has a jitter event... is there a distribution for this though
#the distribution seems to basically be even across the day too, ranging between.03-.06... jitter does seem to rise
#a slight bit in the middle of the night ... but is that even relevant given they're using this during the day
#what is going on w/ these jitter events...
## what % of records are jitter ... 1.84% .. so there is class imbalance here
## what % of records are above .2 ... .012% ... so that is basically nothing
#.. these jitter events don't last long, they're every day, and every hour... it seems something much more granular
#is going to need to be done...
### hmm... there are 41 events at
##hmmm... I'm kind of thinking we should use mini jitters to predict and extended jitter...
# and then potentially use other PVs to predict a mini jitter + the extended jitter
library(glookoGEO)
#Exploratory analysis for one months worth of data
start <- '2016-01-01 00:00:00'
end <- '2016-12-31 23:59:59'
klystron <- 'li24_61'
#PJTN
query <- paste(list("select * from test.pjtn where timestamp >= '", start, "' and timestamp <= '", end, "' and klystron = ", klystron), collapse="")
#query <- paste(list("select * from test.pjtn where timestamp >= '", start, "' and timestamp <= '", end, "'"), collapse="")
pjtn2 <- glookoGetQuery(query)
#15 events have last more than 83 minutes.. which is 7% of the 217, 5 minute events
"select count(*), date_trunc('dayofweek', timestamp) from test.pjtn where timestamp "
query <- paste(list("select count(*), date_trunc('month', timestamp) from test.pjtn where timestamp >= '", start,
"' and timestamp <= '", end, "' and klystron = '", klystron, "'", " and value >= 0.15 group by date_trunc('month', timestamp)"), collapse="") | /exploratory_analysis/pjtn_exploration.R | no_license | vikdad1/stanford_linear_accelerator | R | false | false | 4,563 | r | #PJTN feature developmnet
library(glookoGEO)
#Exploratory analysis for one months worth of data
start <- '2016-01-01 00:00:00'
end <- '2016-01-01 23:59:59'
klystron <- 'li22_31'
#PJTN
query <- paste(list("select * from test.pjtn where timestamp >= '", start, "' and timestamp <= '", end, "' and klystron = ", klystron), collapse="")
#query <- paste(list("select * from test.pjtn where timestamp >= '", start, "' and timestamp <= '", end, "'"), collapse="")
pjtn <- glookoGetQuery(query)
#exploratory analysis of pjtn
max(pjtn$timestamp, na.rm=TRUE) #6/30/2017
min(pjtn$timestamp, na.rm=TRUE) #12/20/2016
summary(pjtn$value) #max 10.6 , mean .1, median .1, 21% of values missing... are these contiguous?
nrow(pjtn[pjtn$value > 0.2,])/nrow(pjtn) #22.6% of rows above 0.2
unique(pjtn$status) #0, 17 --> all NA values
unique(pjtn$severity) #0, 3 --> all NA values
#remove NAs from the data set
pjtn <- pjtn[!is.na(pjtn$value),]
##lets start with li_23 only
pjtn <- pjtn[pjtn$klystron=='li22_31',]
jitter_list <- lapply(unique(pjtn$klystron), function(i) {
pjtn_klystron <- pjtn[pjtn$klystron==i,]
jitter_events <- pvEvent(pjtn_klystron, threshold=.15, event_duration = 5)
return(jitter_events)
})
jitter_events <- do.call(rbind, jitter_list)
write.csv(jitter_events, '~/Documents/GitHub/slac-capstone/vikram/jitter_events_5_minutes.csv')
#extract features to predict jitter events
jitter_feature_list <- lapply(unique(jitter_events$klystron), function(i) {
je_klystron <- jitter_events[jitter_events$klystron==i,]
pjtn_klystron <- pjtn[pjtn$klystron==i,]
features_klystron <- timeSeriesFeatureGeneration(pjtn_klystron, je_klystron)
return(features_klystron)
})
#Now we're going to want to something similar with the following:
#sigma, mod_thy_resv, mod_thy_htr, swrd, focus_i, room
#extract time into variables
library(lubridate)
hour_data <- hour(pjtn$timestamp)
day_data <- day(pjtn$timestamp)
month_data <- month(pjtn$timestamp)
year_data <- year(pjtn$timestamp)
minute_data <- minute(pjtn$timestamp)
pjtn <- cbind(pjtn, minute_data, hour_data, day_data, month_data, year_data)
#Do the same with the jitter data
hour_data <- hour(jitter_events$timestamp)
day_data <- day(jitter_events$timestamp)
month_data <- month(jitter_events$timestamp)
year_data <- year(jitter_events$timestamp)
minute_data <- minute(jitter_events$timestamp)
jitter_events <- cbind(jitter_events, minute_data, hour_data, day_data, month_data, year_data)
write.csv(jitter_events, '~/Documents/GitHub/slac-capstone/vikram/jitter_events_5_minutes_time_features.csv')
##More exploratory analysis
#91% of the days had a jitter event... that means yesterdays data probably isn't going to be particularly predictive
#... needs to be more granular, or the PV is always fucked up at this machine
#every hour has a jitter event... is there a distribution for this though
#the distribution seems to basically be even across the day too, ranging between.03-.06... jitter does seem to rise
#a slight bit in the middle of the night ... but is that even relevant given they're using this during the day
#what is going on w/ these jitter events...
## what % of records are jitter ... 1.84% .. so there is class imbalance here
## what % of records are above .2 ... .012% ... so that is basically nothing
#.. these jitter events don't last long, they're every day, and every hour... it seems something much more granular
#is going to need to be done...
### hmm... there are 41 events at
##hmmm... I'm kind of thinking we should use mini jitters to predict and extended jitter...
# and then potentially use other PVs to predict a mini jitter + the extended jitter
library(glookoGEO)
#Exploratory analysis for one months worth of data
start <- '2016-01-01 00:00:00'
end <- '2016-12-31 23:59:59'
klystron <- 'li24_61'
#PJTN
query <- paste(list("select * from test.pjtn where timestamp >= '", start, "' and timestamp <= '", end, "' and klystron = ", klystron), collapse="")
#query <- paste(list("select * from test.pjtn where timestamp >= '", start, "' and timestamp <= '", end, "'"), collapse="")
pjtn2 <- glookoGetQuery(query)
#15 events have last more than 83 minutes.. which is 7% of the 217, 5 minute events
"select count(*), date_trunc('dayofweek', timestamp) from test.pjtn where timestamp "
query <- paste(list("select count(*), date_trunc('month', timestamp) from test.pjtn where timestamp >= '", start,
"' and timestamp <= '", end, "' and klystron = '", klystron, "'", " and value >= 0.15 group by date_trunc('month', timestamp)"), collapse="") |
source("init.R")
# input <- list(ni=30, nrow=5, near=3, icon="fir", path=2, colle1="green", colle2="lightgreen", colre1="green", colre2="darkgreen", iex = 1, ip = 1, seed = 12345, col="black")
server <- function(input, output) {
output$pdfPlot <- renderPlot({
Forest(ni = input$ni,
nrow = input$nrow,
near = input$near,
icon = eval(parse(text=input$icon)),
path = input$path,
sizeratio = input$sizeratio,
colle = c(input$colle1,input$colle2),
colri = c(input$colre1,input$colre2),
iex = input$iex,
ip = input$ip,
seed = input$seed,
col = input$col)
})
output$colle1 <- renderUI({
colourpicker::colourInput("colle1", "Left color from", "green")
})
output$colle2 <- renderUI({
colourpicker::colourInput("colle2", "Left color to", "lightgreen")
})
output$colri1 <- renderUI({
colourpicker::colourInput("colri1", "Right color from", "darkgreen")
})
output$colir2 <- renderUI({
colourpicker::colourInput("colir2", "Right color to", "green")
})
output$col <- renderUI({
colourpicker::colourInput("col", "Line color", "black")
})
}
| /inst/shiny-examples/forestApp/server.R | no_license | vonthein/illustrator | R | false | false | 1,238 | r |
source("init.R")
# input <- list(ni=30, nrow=5, near=3, icon="fir", path=2, colle1="green", colle2="lightgreen", colre1="green", colre2="darkgreen", iex = 1, ip = 1, seed = 12345, col="black")
server <- function(input, output) {
output$pdfPlot <- renderPlot({
Forest(ni = input$ni,
nrow = input$nrow,
near = input$near,
icon = eval(parse(text=input$icon)),
path = input$path,
sizeratio = input$sizeratio,
colle = c(input$colle1,input$colle2),
colri = c(input$colre1,input$colre2),
iex = input$iex,
ip = input$ip,
seed = input$seed,
col = input$col)
})
output$colle1 <- renderUI({
colourpicker::colourInput("colle1", "Left color from", "green")
})
output$colle2 <- renderUI({
colourpicker::colourInput("colle2", "Left color to", "lightgreen")
})
output$colri1 <- renderUI({
colourpicker::colourInput("colri1", "Right color from", "darkgreen")
})
output$colir2 <- renderUI({
colourpicker::colourInput("colir2", "Right color to", "green")
})
output$col <- renderUI({
colourpicker::colourInput("col", "Line color", "black")
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polarity.R
\name{print.polarity_score}
\alias{print.polarity_score}
\title{Prints a polarity_score Object}
\usage{
\method{print}{polarity_score}(x, digits = 3, ...)
}
\arguments{
\item{x}{The polarity_score object.}
\item{digits}{The number of digits displayed if \code{values} is \code{TRUE}.}
\item{\ldots}{ignored}
}
\description{
Prints a polarity_score object.
}
| /man/print.polarity_score.Rd | no_license | hoodaly/qdap | R | false | true | 450 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polarity.R
\name{print.polarity_score}
\alias{print.polarity_score}
\title{Prints a polarity_score Object}
\usage{
\method{print}{polarity_score}(x, digits = 3, ...)
}
\arguments{
\item{x}{The polarity_score object.}
\item{digits}{The number of digits displayed if \code{values} is \code{TRUE}.}
\item{\ldots}{ignored}
}
\description{
Prints a polarity_score object.
}
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix created a list of functions including setting inverse and getting inverse
## Passing a vector of values into makeCacheMatrix will cache the function with those values
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set=set, get=get,
setinv = setinv,
getinv = getinv)
}
## This function either calculated the inverse of a matrix or retrieves it from cache
## if the values have already been parsed into the previous function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
# Potential Entries
# a<- makeCacheMatrix(matrix(1:4,2,2))
# [,1] [,2]
# [1,] -2 1.5
# [2,] 1 -0.5
# # b<- makeCacheMatrix(matrix(rnorm(n=9),3,3))
# [,1] [,2] [,3]
# [1,] -0.2840163 -3.7511456 -1.02852273
# [2,] -0.3923121 0.9376273 0.01711668
# [3,] -0.2475683 -0.3430920 0.33722210
| /cachematrix.R | no_license | aakritisvyas/ProgrammingAssignment2 | R | false | false | 1,410 | r | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix created a list of functions including setting inverse and getting inverse
## Passing a vector of values into makeCacheMatrix will cache the function with those values
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set=set, get=get,
setinv = setinv,
getinv = getinv)
}
## This function either calculated the inverse of a matrix or retrieves it from cache
## if the values have already been parsed into the previous function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
# Potential Entries
# a<- makeCacheMatrix(matrix(1:4,2,2))
# [,1] [,2]
# [1,] -2 1.5
# [2,] 1 -0.5
# # b<- makeCacheMatrix(matrix(rnorm(n=9),3,3))
# [,1] [,2] [,3]
# [1,] -0.2840163 -3.7511456 -1.02852273
# [2,] -0.3923121 0.9376273 0.01711668
# [3,] -0.2475683 -0.3430920 0.33722210
|
#reading data into R
elet<- read.table("/Users/amir/desktop/household_power_consumption.txt", sep=";",nrows= 2075260, header=TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?")
# Subsetting the full data to obtain the data related to two days:
sub<- subset(elet, (elet$Date == "1/2/2007" | elet$Date== "2/2/2007"))
# creating Plot1
png("plot1.png", width=480, height= 480)
hist(sub$Global_active_power, col= "red", xlab= "Global Active Power (kilowatts)", ylab= "Frequency", main= "Global Active Power")
dev.off()
| /Plot1.R | no_license | syedamiralisha/ExData_Plotting1 | R | false | false | 544 | r | #reading data into R
elet<- read.table("/Users/amir/desktop/household_power_consumption.txt", sep=";",nrows= 2075260, header=TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?")
# Subsetting the full data to obtain the data related to two days:
sub<- subset(elet, (elet$Date == "1/2/2007" | elet$Date== "2/2/2007"))
# creating Plot1
png("plot1.png", width=480, height= 480)
hist(sub$Global_active_power, col= "red", xlab= "Global Active Power (kilowatts)", ylab= "Frequency", main= "Global Active Power")
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.