content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#Working code made by Jim as of 12/4
#Robert modifications 12/4
#Read the dataset
baseball.dat = read.table(file.choose(),header=TRUE)
# -------------------------------------------------------------------
# PRE-DEFINE FUNCTIONS BEFORE MAIN CODE
# -------------------------------------------------------------------
# function to get separate response variable from predictors
ExtractResponseVariable <- function(dataset,name) {
#Takes a dataframe, dataset and a name of a response variable
#Extracts the response variable and dataframe of predictors, outputs these as members
#of a list
if ( name %in% colnames(dataset)) {
name <- as.character(name)
#Get matrix of predictors
predictors <- dataset
predictors[name] <- NULL
#Get response variable
response <- dataset[name]
return(list(response,predictors))
}
else {
print(paste("Name ",name," not found in dataset",sep=''))
return(list(0L,0L))
}
}
# -------------------------------------------------------------------
#Evaluate the fitness of some model, output from lm or glm
#The userfunc should take a fitted model and output a scalar
#fitness value
FitnessFunction <- function(model,userfunc=FALSE){
if (userfunc == FALSE) {
fitness.value <- extractAIC(model)[2]
}
else {
fitness.value <- userfunc(model)
}
return(fitness.value)
}
# -------------------------------------------------------------------
# function that determines 'fitness' of an invidivudal based on the quality
# of the LS fit. The default for determining fitness is the Aikake Criteria Index
# but the user can supply their own custom-made fitness function
# **may be worth it to treat 'predictors' as global variable or object
AssessFitness <- function(individual, response, predictors, userfunc=FALSE){
#Evaluate the fitness of some model, output from lm or glm
#The userfunc should take a fitted model and output a scalar
#fitness value
#RMS simplified the following line
predictors.individual <- predictors[,individual==1]
model.out <- lm(response[,1]~., predictors.individual)
fitness.value <- FitnessFunction(model.out,userfunc=userfunc)
return(fitness.value)
}
# -------------------------------------------------------------------
# Example of user-supplied fitness function only for internal testing
# A test - this does exactly the same as the AIC function,
# but its user-define so can be used to test the fitness_function #useage
TestUserFunc <- function (fit, scale = 0, k = 2) {
n <- length(fit$residuals)
edf <- n - fit$df.residual
RSS <- deviance.lm(fit)
dev <- if (scale > 0)
RSS/scale - n
else n * log(RSS/n)
return(dev + k * edf)
}
# -------------------------------------------------------------------
# Function that breeds P new children based on parents' genome and fitness
Breed <- function(generation, fitness.vec, predictors, prob.mute) {
# generation is a list with each element containing the genome of an individual
# fitness.vec is a vector
prob.reproduction <- 2*rank(-fitness.vec)/(P*(P+1))
parent.index.list <- lapply(1:P, function(x) sample(P,2,prob = prob.reproduction,replace=FALSE))
children <- lapply(parent.index.list, function(x) CrossOverMutate(generation, x, prob.mute))
# return P children to be considered for selection
# also return fitness evaluation
return(children)
}
# -------------------------------------------------------------------
# Function that produces a single child from two chosen parents
# and allows for the possibility of mutation
CrossOverMutate <- function(generation, parent.index, prob.mutate){
#Create child individual with half of its genetic material from parent1 and the other half from parent2
#The generic material is chosen at random using sample
parent1 <- generation[[parent.index[1]]]
parent2 <- generation[[parent.index[2]]]
child <- parent1
#generate locations of genetic information to swap
pos <- sample(1:length(parent2),as.integer(length(parent2)/2),replace=FALSE)
child[pos] <- parent2[pos]
#generate mutation vector
mutate = rbinom(length(child),1,prob.mutate)
#do the mutation - this will ensure that if a 2 is produced,
#set to zero. If not, keeps as 1.
child = (child+mutate)%%2
return(child)
}
# -------------------------------------------------------------------
# MAIN PROGRAM
# -------------------------------------------------------------------
## Put all this in a function that can be called by user on the dataset
# Define response and predictor variables
subsets <- ExtractResponseVariable(baseball.dat,"salary")
# Choose to scale or reject bad data based on boolean flag
flag.log.scale <- 1
if (flag.log.scale) {
response <- log(subsets[[1]])
} else {
response <- subsets[[1]]
}
predictors <- subsets[[2]]
# Define/create key variables a priori
C <- length(predictors) #Get the number of predictors (GLOBAL)
P <- as.integer(C*1.5) #number of individuals in a given generation (GLOBAL)
Niter <- 60 #number of generation iterations to carry out (GLOBAL)
prob.mutate <- 1.0/(P*sqrt(C)) #mutation rate (should be about 1%) Formula suggested by G&H
fitness <- matrix(0,P,Niter) #evolution of the fitness values over model run
frac.replace <- 0.2 # % of individuals in child/adult population selected/replaced
# Define first generation (without FOR loops, lists are preferred)
generation.old <- lapply(1:P, function(x) {rbinom(C,1,0.5)}) # list of individual genomes
#assess fitness of the first generation
fitness[,1] <- sapply(generation.old, AssessFitness, response = response, predictors = predictors, userfunc = FALSE)
# -------------------------------------------------------------------
# MAIN LOOP for genetic algorithm
# put this in a loop function
# Loop through generations and apply selective forces to create iterative generations
start <- Sys.time()
for (n in 1:(Niter-1)) { #loop through fixed number of iterations
# breed selection of P children and assess their fitness
children <- Breed(generation.old, fitness[,n], predictors, mutation.rate)
#generation.new <- children
## simplify so that we replace parents with children without combining the generations (for now)
children.fitness <- sapply(children, AssessFitness, response = response, predictors = predictors, userfunc = FALSE)
#children.best.index <- which(rank(-children.fitness)>round((1-frac.replace)*P)) # select best children to keep
#children.best <- children[children.best.index] # vector length = # of adults to be replaced
#children.fitness.best <- children.fitness[children.best.index] # vector length = # of adults to be replaced
# now create new generation
#generation.old.worst.index <- which(rank(-fitness[,n])<=round(frac.replace*P)) # select worst parent by rank
generation.old <- children # keep most of prior generation
fitness[,n+1] <- children.fitness # keep most of prior generation fitness data
print(min(children.fitness))
}
stop <- Sys.time()
print(stop-start)
# -------------------------------------------------------------------
# Plot up envelope of fitness values
plot(-fitness,xlim=c(0,Niter),ylim=c(50,425),type="n",ylab="Negative AIC",
xlab="Generation",main="AIC Values For Genetic Algorithm")
for(i in 1:Niter){points(rep(i,P),-fitness[,i],pch=20)}
# plot the fitness matrix to see how the entire population evolves over time
# get fit for 'best' individual at end
generation.new <- generation.old
#faster way of getting the best index than using which
best.index <- order(fitness[,Niter])[1]
best.individual <- generation.new[[best.index]]
print(best.individual)
predictors.individual <- predictors[,best.individual==1]
model.out <- lm(response[,1]~., predictors.individual)
summary(model.out)
| /Master_V2.R | no_license | stat243proj/project | R | false | false | 8,045 | r | #Working code made by Jim as of 12/4
#Robert modifications 12/4
#Read the dataset
baseball.dat = read.table(file.choose(),header=TRUE)
# -------------------------------------------------------------------
# PRE-DEFINE FUNCTIONS BEFORE MAIN CODE
# -------------------------------------------------------------------
# function to get separate response variable from predictors
ExtractResponseVariable <- function(dataset,name) {
#Takes a dataframe, dataset and a name of a response variable
#Extracts the response variable and dataframe of predictors, outputs these as members
#of a list
if ( name %in% colnames(dataset)) {
name <- as.character(name)
#Get matrix of predictors
predictors <- dataset
predictors[name] <- NULL
#Get response variable
response <- dataset[name]
return(list(response,predictors))
}
else {
print(paste("Name ",name," not found in dataset",sep=''))
return(list(0L,0L))
}
}
# -------------------------------------------------------------------
#Evaluate the fitness of some model, output from lm or glm
#The userfunc should take a fitted model and output a scalar
#fitness value
FitnessFunction <- function(model,userfunc=FALSE){
if (userfunc == FALSE) {
fitness.value <- extractAIC(model)[2]
}
else {
fitness.value <- userfunc(model)
}
return(fitness.value)
}
# -------------------------------------------------------------------
# function that determines 'fitness' of an invidivudal based on the quality
# of the LS fit. The default for determining fitness is the Aikake Criteria Index
# but the user can supply their own custom-made fitness function
# **may be worth it to treat 'predictors' as global variable or object
AssessFitness <- function(individual, response, predictors, userfunc=FALSE){
#Evaluate the fitness of some model, output from lm or glm
#The userfunc should take a fitted model and output a scalar
#fitness value
#RMS simplified the following line
predictors.individual <- predictors[,individual==1]
model.out <- lm(response[,1]~., predictors.individual)
fitness.value <- FitnessFunction(model.out,userfunc=userfunc)
return(fitness.value)
}
# -------------------------------------------------------------------
# Example of user-supplied fitness function only for internal testing
# A test - this does exactly the same as the AIC function,
# but its user-define so can be used to test the fitness_function #useage
TestUserFunc <- function (fit, scale = 0, k = 2) {
n <- length(fit$residuals)
edf <- n - fit$df.residual
RSS <- deviance.lm(fit)
dev <- if (scale > 0)
RSS/scale - n
else n * log(RSS/n)
return(dev + k * edf)
}
# -------------------------------------------------------------------
# Function that breeds P new children based on parents' genome and fitness
Breed <- function(generation, fitness.vec, predictors, prob.mute) {
# generation is a list with each element containing the genome of an individual
# fitness.vec is a vector
prob.reproduction <- 2*rank(-fitness.vec)/(P*(P+1))
parent.index.list <- lapply(1:P, function(x) sample(P,2,prob = prob.reproduction,replace=FALSE))
children <- lapply(parent.index.list, function(x) CrossOverMutate(generation, x, prob.mute))
# return P children to be considered for selection
# also return fitness evaluation
return(children)
}
# -------------------------------------------------------------------
# Function that produces a single child from two chosen parents
# and allows for the possibility of mutation
CrossOverMutate <- function(generation, parent.index, prob.mutate){
#Create child individual with half of its genetic material from parent1 and the other half from parent2
#The generic material is chosen at random using sample
parent1 <- generation[[parent.index[1]]]
parent2 <- generation[[parent.index[2]]]
child <- parent1
#generate locations of genetic information to swap
pos <- sample(1:length(parent2),as.integer(length(parent2)/2),replace=FALSE)
child[pos] <- parent2[pos]
#generate mutation vector
mutate = rbinom(length(child),1,prob.mutate)
#do the mutation - this will ensure that if a 2 is produced,
#set to zero. If not, keeps as 1.
child = (child+mutate)%%2
return(child)
}
# -------------------------------------------------------------------
# MAIN PROGRAM
# -------------------------------------------------------------------
## Put all this in a function that can be called by user on the dataset
# Define response and predictor variables
subsets <- ExtractResponseVariable(baseball.dat,"salary")
# Choose to scale or reject bad data based on boolean flag
flag.log.scale <- 1
if (flag.log.scale) {
response <- log(subsets[[1]])
} else {
response <- subsets[[1]]
}
predictors <- subsets[[2]]
# Define/create key variables a priori
C <- length(predictors) #Get the number of predictors (GLOBAL)
P <- as.integer(C*1.5) #number of individuals in a given generation (GLOBAL)
Niter <- 60 #number of generation iterations to carry out (GLOBAL)
prob.mutate <- 1.0/(P*sqrt(C)) #mutation rate (should be about 1%) Formula suggested by G&H
fitness <- matrix(0,P,Niter) #evolution of the fitness values over model run
frac.replace <- 0.2 # % of individuals in child/adult population selected/replaced
# Define first generation (without FOR loops, lists are preferred)
generation.old <- lapply(1:P, function(x) {rbinom(C,1,0.5)}) # list of individual genomes
#assess fitness of the first generation
fitness[,1] <- sapply(generation.old, AssessFitness, response = response, predictors = predictors, userfunc = FALSE)
# -------------------------------------------------------------------
# MAIN LOOP for genetic algorithm
# put this in a loop function
# Loop through generations and apply selective forces to create iterative generations
start <- Sys.time()
for (n in 1:(Niter-1)) { #loop through fixed number of iterations
# breed selection of P children and assess their fitness
children <- Breed(generation.old, fitness[,n], predictors, mutation.rate)
#generation.new <- children
## simplify so that we replace parents with children without combining the generations (for now)
children.fitness <- sapply(children, AssessFitness, response = response, predictors = predictors, userfunc = FALSE)
#children.best.index <- which(rank(-children.fitness)>round((1-frac.replace)*P)) # select best children to keep
#children.best <- children[children.best.index] # vector length = # of adults to be replaced
#children.fitness.best <- children.fitness[children.best.index] # vector length = # of adults to be replaced
# now create new generation
#generation.old.worst.index <- which(rank(-fitness[,n])<=round(frac.replace*P)) # select worst parent by rank
generation.old <- children # keep most of prior generation
fitness[,n+1] <- children.fitness # keep most of prior generation fitness data
print(min(children.fitness))
}
stop <- Sys.time()
print(stop-start)
# -------------------------------------------------------------------
# Plot up envelope of fitness values
plot(-fitness,xlim=c(0,Niter),ylim=c(50,425),type="n",ylab="Negative AIC",
xlab="Generation",main="AIC Values For Genetic Algorithm")
for(i in 1:Niter){points(rep(i,P),-fitness[,i],pch=20)}
# plot the fitness matrix to see how the entire population evolves over time
# get fit for 'best' individual at end
generation.new <- generation.old
#faster way of getting the best index than using which
best.index <- order(fitness[,Niter])[1]
best.individual <- generation.new[[best.index]]
print(best.individual)
predictors.individual <- predictors[,best.individual==1]
model.out <- lm(response[,1]~., predictors.individual)
summary(model.out)
|
library(dplyr)
library(knitr)
library(RWeka)
RF <- make_Weka_classifier("weka/classifiers/trees/RandomForest")
NB <- make_Weka_classifier("weka/classifiers/bayes/NaiveBayes")
MLP <- make_Weka_classifier("weka/classifiers/functions/MultilayerPerceptron")
trainDf <- read.csv('data/train.csv')
testDf <- read.csv('data/test.csv')
contractRefDf <- read.csv('data/contract_ref.csv')
calendarRefDf <- read.csv('data/calendar_ref.csv')
dailyAggDf <- read.csv('data/daily_aggregate.csv')
roamingDf <- read.csv('data/roaming_monthly.csv')
trainDf$TARGET <- as.factor(trainDf$TARGET)
trainRoamDf <- trainDf
trainRoamDf[,"R206_USAGE"] <- 0
trainRoamDf[,"R206_SESSION_COUNT"] <- 0
trainRoamDf[,"R207_USAGE"] <- 0
trainRoamDf[,"R207_SESSION_COUNT"] <- 0
trainRoamDf[,"R208_USAGE"] <- 0
trainRoamDf[,"R208_SESSION_COUNT"] <- 0
trainRoamDf[,"R209_USAGE"] <- 0
trainRoamDf[,"R209_SESSION_COUNT"] <- 0
trainRoamDf[,"R210_USAGE"] <- 0
trainRoamDf[,"R210_SESSION_COUNT"] <- 0
testRoamDf <- testDf
testRoamDf[,"R206_USAGE"] <- 0
testRoamDf[,"R206_SESSION_COUNT"] <- 0
testRoamDf[,"R207_USAGE"] <- 0
testRoamDf[,"R207_SESSION_COUNT"] <- 0
testRoamDf[,"R208_USAGE"] <- 0
testRoamDf[,"R208_SESSION_COUNT"] <- 0
testRoamDf[,"R209_USAGE"] <- 0
testRoamDf[,"R209_SESSION_COUNT"] <- 0
testRoamDf[,"R210_USAGE"] <- 0
testRoamDf[,"R210_SESSION_COUNT"] <- 0
for (k in unique(roamingDf$CONTRACT_KEY)) {
orig <- roamingDf[roamingDf$CONTRACT_KEY==k,]
if (trainRoamDf[trainRoamDf$CONTRACT_KEY==k,] %>% nrow > 0) {
val <- orig[orig$CALL_MONTH_KEY == 206,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R206_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R206_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 207,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R207_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R207_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 208,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R208_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R208_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- val[val$CALL_MONTH_KEY == 209,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R209_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R209_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 210,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R210_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R210_SESSION_COUNT"] = val$SESSION_COUNT
}
}
else {
val <- orig[orig$CALL_MONTH_KEY == 206,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R206_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R206_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 207,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R207_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R207_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 208,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R208_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R208_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- val[val$CALL_MONTH_KEY == 209,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R209_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R209_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 210,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R210_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R210_SESSION_COUNT"] = val$SESSION_COUNT
}
}
}
trainRoamDf <- trainRoamDf %>% mutate(X206_SESSION_COUNT = X206_SESSION_COUNT - R206_SESSION_COUNT,
X206_USAGE = X206_USAGE - R206_USAGE,
X207_SESSION_COUNT = X207_SESSION_COUNT - R207_SESSION_COUNT,
X207_USAGE = X207_USAGE - R207_USAGE,
X208_SESSION_COUNT = X208_SESSION_COUNT - R208_SESSION_COUNT,
X208_USAGE = X208_USAGE - R208_USAGE,
X209_SESSION_COUNT = X209_SESSION_COUNT - R209_SESSION_COUNT,
X209_USAGE = X209_USAGE - R209_USAGE,
X210_SESSION_COUNT = X210_SESSION_COUNT - R210_SESSION_COUNT,
X210_USAGE = X210_USAGE - R210_USAGE)
testRoamDf <- testRoamDf %>% mutate(X206_SESSION_COUNT = X206_SESSION_COUNT - R206_SESSION_COUNT,
X206_USAGE = X206_USAGE - R206_USAGE,
X207_SESSION_COUNT = X207_SESSION_COUNT - R207_SESSION_COUNT,
X207_USAGE = X207_USAGE - R207_USAGE,
X208_SESSION_COUNT = X208_SESSION_COUNT - R208_SESSION_COUNT,
X208_USAGE = X208_USAGE - R208_USAGE,
X209_SESSION_COUNT = X209_SESSION_COUNT - R209_SESSION_COUNT,
X209_USAGE = X209_USAGE - R209_USAGE,
X210_SESSION_COUNT = X210_SESSION_COUNT - R210_SESSION_COUNT,
X210_USAGE = X210_USAGE - R210_USAGE)
contractRefDf$RATE_PLAN <- gsub("[2][0][0-9][0-9] ", "", contractRefDf$RATE_PLAN)
contractRefDf$RATE_PLAN <- gsub(" [0-9]+(\\.[0-9]*)* GB", "", contractRefDf$RATE_PLAN)
contractRefDf$RATE_PLAN <- gsub(" [0-9]+", "", contractRefDf$RATE_PLAN)
contractRefDf$RATE_PLAN <- as.factor(contractRefDf$RATE_PLAN)
trainRoamDf <- merge(trainRoamDf, contractRefDf, by = "CONTRACT_KEY")
testRoamDf <- merge(testRoamDf, contractRefDf, by = "CONTRACT_KEY")
myModel <- MLP(TARGET~X206_SESSION_COUNT + X206_USAGE +
X207_SESSION_COUNT + X207_USAGE +
X208_SESSION_COUNT + X208_USAGE +
X209_SESSION_COUNT + X209_USAGE +
X210_SESSION_COUNT + X210_USAGE +
R206_SESSION_COUNT + R206_USAGE +
R207_SESSION_COUNT + R207_USAGE +
R208_SESSION_COUNT + R208_USAGE +
R209_SESSION_COUNT + R209_USAGE +
R210_SESSION_COUNT + R210_USAGE +
RATE_PLAN
, data=trainRoamDf)
myTarget = predict(myModel, newdata = testRoamDf, type="class")
myResult <- data.frame(CONTRACT_KEY=testRoamDf$CONTRACT_KEY, PREDICTED_TARGET=myTarget)
write.table(myResult, file="output/bsmEllah.csv", sep =",", row.names= FALSE) | /scripts/22-tony0.58943/tony0.58943.r | no_license | AmirGeorge/csen1061-data-science-project2 | R | false | false | 6,847 | r | library(dplyr)
library(knitr)
library(RWeka)
RF <- make_Weka_classifier("weka/classifiers/trees/RandomForest")
NB <- make_Weka_classifier("weka/classifiers/bayes/NaiveBayes")
MLP <- make_Weka_classifier("weka/classifiers/functions/MultilayerPerceptron")
trainDf <- read.csv('data/train.csv')
testDf <- read.csv('data/test.csv')
contractRefDf <- read.csv('data/contract_ref.csv')
calendarRefDf <- read.csv('data/calendar_ref.csv')
dailyAggDf <- read.csv('data/daily_aggregate.csv')
roamingDf <- read.csv('data/roaming_monthly.csv')
trainDf$TARGET <- as.factor(trainDf$TARGET)
trainRoamDf <- trainDf
trainRoamDf[,"R206_USAGE"] <- 0
trainRoamDf[,"R206_SESSION_COUNT"] <- 0
trainRoamDf[,"R207_USAGE"] <- 0
trainRoamDf[,"R207_SESSION_COUNT"] <- 0
trainRoamDf[,"R208_USAGE"] <- 0
trainRoamDf[,"R208_SESSION_COUNT"] <- 0
trainRoamDf[,"R209_USAGE"] <- 0
trainRoamDf[,"R209_SESSION_COUNT"] <- 0
trainRoamDf[,"R210_USAGE"] <- 0
trainRoamDf[,"R210_SESSION_COUNT"] <- 0
testRoamDf <- testDf
testRoamDf[,"R206_USAGE"] <- 0
testRoamDf[,"R206_SESSION_COUNT"] <- 0
testRoamDf[,"R207_USAGE"] <- 0
testRoamDf[,"R207_SESSION_COUNT"] <- 0
testRoamDf[,"R208_USAGE"] <- 0
testRoamDf[,"R208_SESSION_COUNT"] <- 0
testRoamDf[,"R209_USAGE"] <- 0
testRoamDf[,"R209_SESSION_COUNT"] <- 0
testRoamDf[,"R210_USAGE"] <- 0
testRoamDf[,"R210_SESSION_COUNT"] <- 0
for (k in unique(roamingDf$CONTRACT_KEY)) {
orig <- roamingDf[roamingDf$CONTRACT_KEY==k,]
if (trainRoamDf[trainRoamDf$CONTRACT_KEY==k,] %>% nrow > 0) {
val <- orig[orig$CALL_MONTH_KEY == 206,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R206_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R206_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 207,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R207_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R207_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 208,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R208_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R208_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- val[val$CALL_MONTH_KEY == 209,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R209_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R209_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 210,]
if (nrow(val) > 0) {
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R210_USAGE"] = val$USAGE
trainRoamDf[trainRoamDf$CONTRACT_KEY==k,"R210_SESSION_COUNT"] = val$SESSION_COUNT
}
}
else {
val <- orig[orig$CALL_MONTH_KEY == 206,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R206_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R206_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 207,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R207_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R207_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 208,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R208_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R208_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- val[val$CALL_MONTH_KEY == 209,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R209_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R209_SESSION_COUNT"] = val$SESSION_COUNT
}
val <- orig[orig$CALL_MONTH_KEY == 210,]
if (nrow(val) > 0) {
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R210_USAGE"] = val$USAGE
testRoamDf[testRoamDf$CONTRACT_KEY==k,"R210_SESSION_COUNT"] = val$SESSION_COUNT
}
}
}
trainRoamDf <- trainRoamDf %>% mutate(X206_SESSION_COUNT = X206_SESSION_COUNT - R206_SESSION_COUNT,
X206_USAGE = X206_USAGE - R206_USAGE,
X207_SESSION_COUNT = X207_SESSION_COUNT - R207_SESSION_COUNT,
X207_USAGE = X207_USAGE - R207_USAGE,
X208_SESSION_COUNT = X208_SESSION_COUNT - R208_SESSION_COUNT,
X208_USAGE = X208_USAGE - R208_USAGE,
X209_SESSION_COUNT = X209_SESSION_COUNT - R209_SESSION_COUNT,
X209_USAGE = X209_USAGE - R209_USAGE,
X210_SESSION_COUNT = X210_SESSION_COUNT - R210_SESSION_COUNT,
X210_USAGE = X210_USAGE - R210_USAGE)
testRoamDf <- testRoamDf %>% mutate(X206_SESSION_COUNT = X206_SESSION_COUNT - R206_SESSION_COUNT,
X206_USAGE = X206_USAGE - R206_USAGE,
X207_SESSION_COUNT = X207_SESSION_COUNT - R207_SESSION_COUNT,
X207_USAGE = X207_USAGE - R207_USAGE,
X208_SESSION_COUNT = X208_SESSION_COUNT - R208_SESSION_COUNT,
X208_USAGE = X208_USAGE - R208_USAGE,
X209_SESSION_COUNT = X209_SESSION_COUNT - R209_SESSION_COUNT,
X209_USAGE = X209_USAGE - R209_USAGE,
X210_SESSION_COUNT = X210_SESSION_COUNT - R210_SESSION_COUNT,
X210_USAGE = X210_USAGE - R210_USAGE)
contractRefDf$RATE_PLAN <- gsub("[2][0][0-9][0-9] ", "", contractRefDf$RATE_PLAN)
contractRefDf$RATE_PLAN <- gsub(" [0-9]+(\\.[0-9]*)* GB", "", contractRefDf$RATE_PLAN)
contractRefDf$RATE_PLAN <- gsub(" [0-9]+", "", contractRefDf$RATE_PLAN)
contractRefDf$RATE_PLAN <- as.factor(contractRefDf$RATE_PLAN)
trainRoamDf <- merge(trainRoamDf, contractRefDf, by = "CONTRACT_KEY")
testRoamDf <- merge(testRoamDf, contractRefDf, by = "CONTRACT_KEY")
myModel <- MLP(TARGET~X206_SESSION_COUNT + X206_USAGE +
X207_SESSION_COUNT + X207_USAGE +
X208_SESSION_COUNT + X208_USAGE +
X209_SESSION_COUNT + X209_USAGE +
X210_SESSION_COUNT + X210_USAGE +
R206_SESSION_COUNT + R206_USAGE +
R207_SESSION_COUNT + R207_USAGE +
R208_SESSION_COUNT + R208_USAGE +
R209_SESSION_COUNT + R209_USAGE +
R210_SESSION_COUNT + R210_USAGE +
RATE_PLAN
, data=trainRoamDf)
myTarget = predict(myModel, newdata = testRoamDf, type="class")
myResult <- data.frame(CONTRACT_KEY=testRoamDf$CONTRACT_KEY, PREDICTED_TARGET=myTarget)
write.table(myResult, file="output/bsmEllah.csv", sep =",", row.names= FALSE) |
data <- read.table("household_power_consumption.txt", skip=1,sep=";")
names(data) <- c("Date","Time","Global_active_power","Global_reactive_power",
"Voltage","Global_intensity","Sub_metering_1","Sub_metering_2",
"Sub_metering_3")
data1 <- subset(data, data$Date=="1/2/2007" | data$Date =="2/2/2007")
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
data1$Time <- strptime(data1$Time, format="%H:%M:%S")
data1[1:1440,"Time"] <- format(data1[1:1440,"Time"],"2007-02-01 %H:%M:%S")
data1[1441:2880,"Time"] <- format(data1[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
png("plot3.png", width=480, height=480)
with(data1, plot(Time, Sub_metering_1,type = "l",col = "grey",
xlab = "",
ylab ="Energy sub metering"))
lines(data1$Time, data1$Sub_metering_2, col = "red")
lines(data1$Time, data1$Sub_metering_3, col = "blue")
legend("topright", lty=1, col=c("grey","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off() | /Exploratory Data Analysis/Week1/plot3.r | no_license | JohnChen-kmg/Coursera-Data-Science-notes | R | false | false | 995 | r | data <- read.table("household_power_consumption.txt", skip=1,sep=";")
names(data) <- c("Date","Time","Global_active_power","Global_reactive_power",
"Voltage","Global_intensity","Sub_metering_1","Sub_metering_2",
"Sub_metering_3")
data1 <- subset(data, data$Date=="1/2/2007" | data$Date =="2/2/2007")
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
data1$Time <- strptime(data1$Time, format="%H:%M:%S")
data1[1:1440,"Time"] <- format(data1[1:1440,"Time"],"2007-02-01 %H:%M:%S")
data1[1441:2880,"Time"] <- format(data1[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
png("plot3.png", width=480, height=480)
with(data1, plot(Time, Sub_metering_1,type = "l",col = "grey",
xlab = "",
ylab ="Energy sub metering"))
lines(data1$Time, data1$Sub_metering_2, col = "red")
lines(data1$Time, data1$Sub_metering_3, col = "blue")
legend("topright", lty=1, col=c("grey","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off() |
#Alejandra Martínez Blancas & Carlos Martorell 03/05/22 alemtzb@ciencias.unam.mx
#read data
DD=read.csv("clumpingmecs/simulations/DDs.csv") #mortality parameter
BBs1=read.csv("clumpingmecs/simulations/BBs1.csv") #parameter a
BBs2=read.csv("clumpingmecs/simulations/BBs2.csv") #parameter b
BBs3=read.csv("clumpingmecs/simulations/BBs3.csv") #parametro c
alfas1=read.csv("clumpingmecs/simulations/alfas1.csv") #parameter g
alfas2=read.csv("clumpingmecs/simulations/alfas2.csv") #parameter h
betas=read.csv("clumpingmecs/simulations/betas.csv")#facilitation parameter
#convert all data to matrices
BB1=as.matrix(BBs1[,2:ncol(BBs1)])
rownames(BB1)=BBs1[,1]
BB2=as.matrix(BBs2[,2:ncol(BBs2)])
rownames(BB2)=BBs2[,1]
BB3=as.matrix(BBs3[,2:ncol(BBs3)])
rownames(BB3)=BBs3[,1]
alphas1=as.matrix(alfas1[,2:ncol(alfas1)])
rownames(alphas1)=alfas1[,1]
alphas2=as.matrix(alfas2[,2:ncol(alfas2)])
rownames(alphas2)=alfas2[,1]
bbetas=as.matrix(betas[,2:ncol(betas)])
rownames(bbetas)=betas[,1]
#We take into account only the birth rate parameters of the last seven years
BB1=BB1[,8:14]
BB2=BB2[,8:14]
BB3=BB3[,8:14]
#Detransform parameters
BB2=1/(1+exp(-BB2))-.5
BB3=-1/(1+exp(-BB3))*5/1000
-999->alphas1[which(is.na(alphas1[,])=="TRUE")]
0->alphas2[which(is.na(alphas2[,])=="TRUE")]
bbetas=1/(1+exp(-bbetas))
0->bbetas[which(is.na(bbetas[,])=="TRUE")]
#To obtain total abundance of the species for which we did not calculate pairwise interactions
spnum=ncol(alphas1) #to obtain the number of species in our study
tx0=matrix(ncol=ncol(alphas1),nrow=nrow(alphas1))
0->tx0[which(is.na(alphas1[,])=="FALSE")]
1->tx0[which(is.na(alphas1[,])=="TRUE")]
tx0=tx0[,-37]
#separate the parameters of the species for which we have abundance data
alpabu1=alphas1[1:33,1:37]
alpabu2=alphas2[1:33,1:37]
betabu=bbetas[1:33,1:37]
DDabu=DD[1:33,]
BB1abu=BB1[1:33,]
BB2abu=BB2[1:33,]
BB3abu=BB3[1:33,]
#separate the parameters of the species for which we have presence/absence data
alppa1=alphas1[34:36,1:37]
alppa2=alphas2[34:36,1:37]
betpa=bbetas[34:36,1:37]
DDpa=DD[34:36,]
BB1pa=BB1[34:36,]
BB2pa=BB2[34:36,]
BB3pa=BB3[34:36,]
#Function for species with abundance data to simulate the abundance of species in the next year
lam=function(DD,BB1,BB2,BB3,alphas1,alphas2,bbetas,tx,txothers,year,depth){
surv=(1-DD[,2])*tx[1:33]
BB1=BB1[,year]
BB2=BB2[,year]
BB3=BB3[,year]
BB=exp(BB1+BB2*depth+BB3*depth^2)
alphas=exp(alphas1+alphas2*depth)
alphaspre=alphas[,1:36]%*%tx
alphasothers=alphas[,37]*txothers
alphasum=alphaspre+alphasothers
txmas=log(tx+1)
txmasother=log(txothers+1)
betaspre=bbetas[,1:36]%*%txmas
betasothers=bbetas[,37]*txmasother
betassum=betaspre+betasothers
fac=exp(betassum)
new=BB*tx[1:33]/(1+alphasum)*fac
t2=surv+new
return(t2)
}
#Function for species with presence absence daata to simulate if the species is present of absent in the next year
lampa=function(DD,BB1,BB2,BB3,alphas1,alphas2,bbetas,tx,txothers,year,depth){
surv=(1-DD[,2])*tx[34:36]
BB1=BB1[,year]
BB2=BB2[,year]
BB3=BB3[,year]
BB=exp(BB1+BB2*depth+BB3*depth^2)
alphas=exp(alphas1+alphas2*depth)
alphaspre=alphas[,1:36]%*%tx
alphasothers=alphas[,37]*txothers
alphasum=alphaspre+alphasothers
txmas=log(tx+1)
txmasother=log(txothers+1)
betaspre=bbetas[,1:36]%*%txmas
betasothers=bbetas[,37]*txmasother
betassum=betaspre+betasothers
fac=exp(betassum)
new=BB*tx[34:36]/(1+alphasum)*fac
t2=1-(1-surv)*exp(-new)
return(t2)
}
#To obtain initial abuncances of the species
spnum=ncol(alphas1)
tx=matrix(ncol=1,nrow=nrow(alphas1))
tx[1:33,1]=runif(33, min = .001, max = 1)
tx[34:36,1]=runif(3, min = .001, max = 1)
tx[35,1]=0 #eliminates species 35 which we were unable to model correctly
#Function that integrates both species with abuncance data and species with presence absence data into the same simulation
lamx=function(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,year,depth){
txothers=as.vector(tx0%*%tx)
t2abu=lam(DDabu,BB1abu,BB2abu,BB3abu,alpabu1,alpabu2,betabu,tx,txothers[1:33],year,depth)
t2pa=lampa(DDpa,BB1pa,BB2pa,BB3pa,alppa1,alppa2,betpa,tx,txothers[34:36],year,depth)
t2=matrix(nrow=36,ncol=1)
t2[1:33,]=t2abu[1:33,]
t2[34:36,]=t2pa
rownames(t2)=rownames(DD)
return(t2)
}
#Function that runs the simulation for a species number of time steps (iter) but ilimanated a specific number of runs (burn)
simu=function(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,depth,iter=1000,burn=100){
for(i in 1:burn) {
tx=lamx(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,floor(runif(1)*7)+1,depth)
}
sal=matrix(nrow=36,ncol=iter)
for(i in 1:iter) {
sal[,i]=tx
tx=lamx(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,floor(runif(1)*7)+1,depth)
}
rownames(sal)=rownames(BB1)
return(sal)
}
#Runs the simulation for each soil depth between 3 and 28 cm every 0.1 cm
profs=function(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,iter){
prof=seq(3,28,0.1)
ncat=length(seq(3,28,0.1))
sal=matrix(nrow=36,ncol=ncat)
for(i in 1:ncat){
sal[,i]=rowMeans(simu(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,prof[i],iter=iter))
}
return(sal)
}
prue=profs(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,100000)
#To plot the outcome. Eah color represents a different species
plot(-1000,-1000,xlim=c(0,28),ylim=c(0,max(prue)))
prof=seq(3,28,0.1)
for(i in 1:36) lines(prof,prue[i,],col=i)
#Runs the simulation along the soil depht a specific number of times (rep) and averages the outcome to obtain smoother abuncance curves over the gradient
meanprof=function(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,iter,rep){
prof=seq(3,28,0.1)
ncat=length(seq(3,28,0.1))
sal=array(dim=c(36,ncat,rep))
for(k in 1:rep){
sal[,,k]=profs(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,iter)
}
return(sal)
}
pruemean=meanprof(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,5000,20)
#To plot the outcome
plot(-1000,-1000,xlim=c(3,28),ylim=c(0,max(pruemean,na.rm=TRUE)),xlab="Profundidad de Suelo",ylab="Abundancia")
prof=seq(3,28,0.1)
for(i in 1:36) lines(prof,rowMeans(pruemean[i,,]),col=i)
| /simulations/WithInteractions.R | no_license | alemtzb/clumpingmecs | R | false | false | 6,555 | r | #Alejandra Martínez Blancas & Carlos Martorell 03/05/22 alemtzb@ciencias.unam.mx
#read data
DD=read.csv("clumpingmecs/simulations/DDs.csv") #mortality parameter
BBs1=read.csv("clumpingmecs/simulations/BBs1.csv") #parameter a
BBs2=read.csv("clumpingmecs/simulations/BBs2.csv") #parameter b
BBs3=read.csv("clumpingmecs/simulations/BBs3.csv") #parametro c
alfas1=read.csv("clumpingmecs/simulations/alfas1.csv") #parameter g
alfas2=read.csv("clumpingmecs/simulations/alfas2.csv") #parameter h
betas=read.csv("clumpingmecs/simulations/betas.csv")#facilitation parameter
#convert all data to matrices
BB1=as.matrix(BBs1[,2:ncol(BBs1)])
rownames(BB1)=BBs1[,1]
BB2=as.matrix(BBs2[,2:ncol(BBs2)])
rownames(BB2)=BBs2[,1]
BB3=as.matrix(BBs3[,2:ncol(BBs3)])
rownames(BB3)=BBs3[,1]
alphas1=as.matrix(alfas1[,2:ncol(alfas1)])
rownames(alphas1)=alfas1[,1]
alphas2=as.matrix(alfas2[,2:ncol(alfas2)])
rownames(alphas2)=alfas2[,1]
bbetas=as.matrix(betas[,2:ncol(betas)])
rownames(bbetas)=betas[,1]
#We take into account only the birth rate parameters of the last seven years
BB1=BB1[,8:14]
BB2=BB2[,8:14]
BB3=BB3[,8:14]
#Detransform parameters
BB2=1/(1+exp(-BB2))-.5
BB3=-1/(1+exp(-BB3))*5/1000
-999->alphas1[which(is.na(alphas1[,])=="TRUE")]
0->alphas2[which(is.na(alphas2[,])=="TRUE")]
bbetas=1/(1+exp(-bbetas))
0->bbetas[which(is.na(bbetas[,])=="TRUE")]
#To obtain total abundance of the species for which we did not calculate pairwise interactions
spnum=ncol(alphas1) #to obtain the number of species in our study
tx0=matrix(ncol=ncol(alphas1),nrow=nrow(alphas1))
0->tx0[which(is.na(alphas1[,])=="FALSE")]
1->tx0[which(is.na(alphas1[,])=="TRUE")]
tx0=tx0[,-37]
#separate the parameters of the species for which we have abundance data
alpabu1=alphas1[1:33,1:37]
alpabu2=alphas2[1:33,1:37]
betabu=bbetas[1:33,1:37]
DDabu=DD[1:33,]
BB1abu=BB1[1:33,]
BB2abu=BB2[1:33,]
BB3abu=BB3[1:33,]
#separate the parameters of the species for which we have presence/absence data
alppa1=alphas1[34:36,1:37]
alppa2=alphas2[34:36,1:37]
betpa=bbetas[34:36,1:37]
DDpa=DD[34:36,]
BB1pa=BB1[34:36,]
BB2pa=BB2[34:36,]
BB3pa=BB3[34:36,]
#Function for species with abundance data to simulate the abundance of species in the next year
lam=function(DD,BB1,BB2,BB3,alphas1,alphas2,bbetas,tx,txothers,year,depth){
surv=(1-DD[,2])*tx[1:33]
BB1=BB1[,year]
BB2=BB2[,year]
BB3=BB3[,year]
BB=exp(BB1+BB2*depth+BB3*depth^2)
alphas=exp(alphas1+alphas2*depth)
alphaspre=alphas[,1:36]%*%tx
alphasothers=alphas[,37]*txothers
alphasum=alphaspre+alphasothers
txmas=log(tx+1)
txmasother=log(txothers+1)
betaspre=bbetas[,1:36]%*%txmas
betasothers=bbetas[,37]*txmasother
betassum=betaspre+betasothers
fac=exp(betassum)
new=BB*tx[1:33]/(1+alphasum)*fac
t2=surv+new
return(t2)
}
#Function for species with presence absence daata to simulate if the species is present of absent in the next year
lampa=function(DD,BB1,BB2,BB3,alphas1,alphas2,bbetas,tx,txothers,year,depth){
surv=(1-DD[,2])*tx[34:36]
BB1=BB1[,year]
BB2=BB2[,year]
BB3=BB3[,year]
BB=exp(BB1+BB2*depth+BB3*depth^2)
alphas=exp(alphas1+alphas2*depth)
alphaspre=alphas[,1:36]%*%tx
alphasothers=alphas[,37]*txothers
alphasum=alphaspre+alphasothers
txmas=log(tx+1)
txmasother=log(txothers+1)
betaspre=bbetas[,1:36]%*%txmas
betasothers=bbetas[,37]*txmasother
betassum=betaspre+betasothers
fac=exp(betassum)
new=BB*tx[34:36]/(1+alphasum)*fac
t2=1-(1-surv)*exp(-new)
return(t2)
}
#To obtain initial abuncances of the species
spnum=ncol(alphas1)
tx=matrix(ncol=1,nrow=nrow(alphas1))
tx[1:33,1]=runif(33, min = .001, max = 1)
tx[34:36,1]=runif(3, min = .001, max = 1)
tx[35,1]=0 #eliminates species 35 which we were unable to model correctly
#Function that integrates both species with abuncance data and species with presence absence data into the same simulation
lamx=function(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,year,depth){
txothers=as.vector(tx0%*%tx)
t2abu=lam(DDabu,BB1abu,BB2abu,BB3abu,alpabu1,alpabu2,betabu,tx,txothers[1:33],year,depth)
t2pa=lampa(DDpa,BB1pa,BB2pa,BB3pa,alppa1,alppa2,betpa,tx,txothers[34:36],year,depth)
t2=matrix(nrow=36,ncol=1)
t2[1:33,]=t2abu[1:33,]
t2[34:36,]=t2pa
rownames(t2)=rownames(DD)
return(t2)
}
#Function that runs the simulation for a species number of time steps (iter) but ilimanated a specific number of runs (burn)
simu=function(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,depth,iter=1000,burn=100){
for(i in 1:burn) {
tx=lamx(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,floor(runif(1)*7)+1,depth)
}
sal=matrix(nrow=36,ncol=iter)
for(i in 1:iter) {
sal[,i]=tx
tx=lamx(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,floor(runif(1)*7)+1,depth)
}
rownames(sal)=rownames(BB1)
return(sal)
}
#Runs the simulation for each soil depth between 3 and 28 cm every 0.1 cm
profs=function(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,iter){
prof=seq(3,28,0.1)
ncat=length(seq(3,28,0.1))
sal=matrix(nrow=36,ncol=ncat)
for(i in 1:ncat){
sal[,i]=rowMeans(simu(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,prof[i],iter=iter))
}
return(sal)
}
prue=profs(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,100000)
#To plot the outcome. Eah color represents a different species
plot(-1000,-1000,xlim=c(0,28),ylim=c(0,max(prue)))
prof=seq(3,28,0.1)
for(i in 1:36) lines(prof,prue[i,],col=i)
#Runs the simulation along the soil depht a specific number of times (rep) and averages the outcome to obtain smoother abuncance curves over the gradient
meanprof=function(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,iter,rep){
prof=seq(3,28,0.1)
ncat=length(seq(3,28,0.1))
sal=array(dim=c(36,ncat,rep))
for(k in 1:rep){
sal[,,k]=profs(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,iter)
}
return(sal)
}
pruemean=meanprof(DDabu,alpabu1,alpabu2,betabu,BB1abu,BB2abu,BB3abu,alppa1,alppa2,betapa,DDpa,BB1pa,BB2pa,BB3pa,tx,tx0,5000,20)
#To plot the outcome
plot(-1000,-1000,xlim=c(3,28),ylim=c(0,max(pruemean,na.rm=TRUE)),xlab="Profundidad de Suelo",ylab="Abundancia")
prof=seq(3,28,0.1)
for(i in 1:36) lines(prof,rowMeans(pruemean[i,,]),col=i)
|
testRfile <- read.table(
"testfile.txt",
sep="\t", header=TRUE,
colClasses=c("NULL", NA, NA, NA, NA, "NULL", "NULL"))
| /reading a strava txt file into R.r | no_license | jeffshep/Strava | R | false | false | 143 | r | testRfile <- read.table(
"testfile.txt",
sep="\t", header=TRUE,
colClasses=c("NULL", NA, NA, NA, NA, "NULL", "NULL"))
|
#' Building R Packages - Week 2 Assignment
#' filename: fars_functions.R
#'
#' These functions read in data taken from the US National Highway Traffic Safety Administration's
#' \href{https://www.nhtsa.gov/Data/Fatality-Analysis-Reporting-System-(FARS)}{Fatality Analysis Reporting System}
#'
#' @title fars_read
#'
#' @description Reads in file to data variable using the read_csv function
#' and creates a dataframe summarizing the contents.
#'
#' @param filename A character object which corresponds to a valid path of the data file.
#' In case the file does not exist an error message is produced and execution stops.
#'
#' @return The function returns a dataframe based on the CSV file.
#'
#' @importFrom readr read_csv
#' @importFrom dplyr tbl_df
#'
#' @examples
#' \dontrun{
#' accident_2015 <- fars_read(".inst/extdata/accident_2015.csv.bz2")
#' }
#' @export
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
#' @title make_filename
#'
#' @description
#' The function creates a filename for a .csv.bz2 file based on the \code{year}
#' argument in a form "accident_<year>.csv.bz2". It requires a numerical or
#' integer input otherwise ends with an error.
#'
#' @param year Numerical or integer input indicating a year of a dataset
#'
#' @return Returns a character string in a format "accident_<year>.csv.bz2" that
#' can be used as a file name
#'
#' @examples
#' \dontrun{
#' make_filename(2015)
#' }
#' @export
make_filename <- function(year) {
year <- as.integer(year)
sprintf("accident_%d.csv.bz2", year)
}
#' @title fars_read_years
#'
#' @description
#' The function accepts a vector or list of years and returns a list of dataframe
#' with MONTH and year columns based on data in "accident_<year>.csv.bz2
#' files. The files need to be located in the working directory.
#'
#' @param years A vector or list of years in numeric or integer format
#'
#' @return Returns a list of dataframe with the same number of rows
#' as the data in "accident_<year>.csv.bz2" files sorted by year and MONTH.
#'
#' If any of the objects requested via input is not available as a year file
#' or is not coercible to integer an "invalid year" error message returns.
#'
#' @importFrom dplyr mutate select %>%
#'
#' @examples
#' \dontrun{
#' fars_read_years(2013:2015)
#' fars_read_years(list(2013, 2014))
#'
#' # Results in a warning
#' fars_read_years(2016)
#' }
#' @export
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate_(dat, year = "YEAR") %>%
dplyr::select_("MONTH", "year")
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' @title fars_summarize_years
#'
#' @description
#' takes a list of years and reads them in using fars_read_years
#' it then binds these dataframe together and summarizes the data.
#'
#' @param years A vector or list of years (numeric or integer) to
#' read in and summarize
#'
#' The function will take in a vector of years and read in the data using
#' the fars_summarize_years function, it then binds these rows together and
#' groups by the year and MONTH column creating a count column: n.
#' The data is then converted from a long format to a wide format using
#' the spread function in tidyr.
#'
#' @return a data.frame of summarized data which is converted to a wide format
#'
#' @importFrom dplyr bind_rows group_by summarize %>% n
#' @importFrom tidyr spread
#'
#' @examples
#' \dontrun{
#' fars_summarize_years("2015")
#' fars_summarize_years(c(2013.0,2014))
#' }
#' @export
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by_("year", "MONTH") %>%
dplyr::summarize_(n = "n()") %>%
tidyr::spread_("year", "n")
}
#' @title fars_map_state
#'
#' @description
#' This function takes a state number and set of years as input and shows
#' an overview of fatalities on a map in that particular time period.
#'
#' Uses function make_filename and fars_read from the current package.
#' Removes coordinate outliers - longitude values greater than 900
#' and latitude values greater than 90 are removed.
#'
#' @param state.num The number of a state in the US as used in the FARS dataset
#' Should be numeric or integer.
#' @param year The year of analysis (numeric or integer)
#'
#' @return a graphical overview of fatalities on a map in a particular time period.
#' Returns an error if the state or year do not exist in the data set.
#'
#' @examples
#' \dontrun{
#' fars_map_state(45, 2015)
#'
#' # Results in an error
#' fars_map_state(45, 2016)
#' fars_map_state(60, 2015)
#' }
#'
#' @importFrom dplyr filter
#' @importFrom maps map
#' @importFrom graphics points
#'
#' @export
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE))) {
stop("invalid STATE number: ", state.num)
}
data.sub <- dplyr::filter_(data, .dots = paste0("STATE==", state.num))
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
| /R/fars_functions.R | no_license | smallikarjun/MyfarsPkg | R | false | false | 5,658 | r | #' Building R Packages - Week 2 Assignment
#' filename: fars_functions.R
#'
#' These functions read in data taken from the US National Highway Traffic Safety Administration's
#' \href{https://www.nhtsa.gov/Data/Fatality-Analysis-Reporting-System-(FARS)}{Fatality Analysis Reporting System}
#'
#' @title fars_read
#'
#' @description Reads in file to data variable using the read_csv function
#' and creates a dataframe summarizing the contents.
#'
#' @param filename A character object which corresponds to a valid path of the data file.
#' In case the file does not exist an error message is produced and execution stops.
#'
#' @return The function returns a dataframe based on the CSV file.
#'
#' @importFrom readr read_csv
#' @importFrom dplyr tbl_df
#'
#' @examples
#' \dontrun{
#' accident_2015 <- fars_read(".inst/extdata/accident_2015.csv.bz2")
#' }
#' @export
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
#' @title make_filename
#'
#' @description
#' The function creates a filename for a .csv.bz2 file based on the \code{year}
#' argument in a form "accident_<year>.csv.bz2". It requires a numerical or
#' integer input otherwise ends with an error.
#'
#' @param year Numerical or integer input indicating a year of a dataset
#'
#' @return Returns a character string in a format "accident_<year>.csv.bz2" that
#' can be used as a file name
#'
#' @examples
#' \dontrun{
#' make_filename(2015)
#' }
#' @export
make_filename <- function(year) {
year <- as.integer(year)
sprintf("accident_%d.csv.bz2", year)
}
#' @title fars_read_years
#'
#' @description
#' The function accepts a vector or list of years and returns a list of dataframe
#' with MONTH and year columns based on data in "accident_<year>.csv.bz2
#' files. The files need to be located in the working directory.
#'
#' @param years A vector or list of years in numeric or integer format
#'
#' @return Returns a list of dataframe with the same number of rows
#' as the data in "accident_<year>.csv.bz2" files sorted by year and MONTH.
#'
#' If any of the objects requested via input is not available as a year file
#' or is not coercible to integer an "invalid year" error message returns.
#'
#' @importFrom dplyr mutate select %>%
#'
#' @examples
#' \dontrun{
#' fars_read_years(2013:2015)
#' fars_read_years(list(2013, 2014))
#'
#' # Results in a warning
#' fars_read_years(2016)
#' }
#' @export
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate_(dat, year = "YEAR") %>%
dplyr::select_("MONTH", "year")
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' @title fars_summarize_years
#'
#' @description
#' takes a list of years and reads them in using fars_read_years
#' it then binds these dataframe together and summarizes the data.
#'
#' @param years A vector or list of years (numeric or integer) to
#' read in and summarize
#'
#' The function will take in a vector of years and read in the data using
#' the fars_summarize_years function, it then binds these rows together and
#' groups by the year and MONTH column creating a count column: n.
#' The data is then converted from a long format to a wide format using
#' the spread function in tidyr.
#'
#' @return a data.frame of summarized data which is converted to a wide format
#'
#' @importFrom dplyr bind_rows group_by summarize %>% n
#' @importFrom tidyr spread
#'
#' @examples
#' \dontrun{
#' fars_summarize_years("2015")
#' fars_summarize_years(c(2013.0,2014))
#' }
#' @export
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by_("year", "MONTH") %>%
dplyr::summarize_(n = "n()") %>%
tidyr::spread_("year", "n")
}
#' @title fars_map_state
#'
#' @description
#' This function takes a state number and set of years as input and shows
#' an overview of fatalities on a map in that particular time period.
#'
#' Uses function make_filename and fars_read from the current package.
#' Removes coordinate outliers - longitude values greater than 900
#' and latitude values greater than 90 are removed.
#'
#' @param state.num The number of a state in the US as used in the FARS dataset
#' Should be numeric or integer.
#' @param year The year of analysis (numeric or integer)
#'
#' @return a graphical overview of fatalities on a map in a particular time period.
#' Returns an error if the state or year do not exist in the data set.
#'
#' @examples
#' \dontrun{
#' fars_map_state(45, 2015)
#'
#' # Results in an error
#' fars_map_state(45, 2016)
#' fars_map_state(60, 2015)
#' }
#'
#' @importFrom dplyr filter
#' @importFrom maps map
#' @importFrom graphics points
#'
#' @export
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE))) {
stop("invalid STATE number: ", state.num)
}
data.sub <- dplyr::filter_(data, .dots = paste0("STATE==", state.num))
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package-huf.R
\name{rmf_write_kdep}
\alias{rmf_write_kdep}
\title{Write a MODFLOW hydraulic conductivity depth-dependence capability file}
\usage{
rmf_write_kdep(
kdep,
file = {
cat("Please select kdep file to overwrite or provide new filename ...\\n")
file.choose()
},
iprn = -1,
...
)
}
\arguments{
\item{kdep}{an \code{RMODFLOW} kdep object}
\item{file}{filename to write to; typically '*.kdep'}
\item{iprn}{format code for printing arrays in the listing file; defaults to -1 (no printing)}
\item{...}{arguments passed to \code{rmfi_write_array}. Can be ignored when arrays are INTERNAL or CONSTANT.}
}
\value{
\code{NULL}
}
\description{
Write a MODFLOW hydraulic conductivity depth-dependence capability file
}
\seealso{
\code{\link{rmf_create_kdep}}, \code{\link{rmf_read_kdep}} and \url{http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?kdep.htm}
}
| /man/rmf_write_kdep.Rd | no_license | rogiersbart/RMODFLOW | R | false | true | 980 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package-huf.R
\name{rmf_write_kdep}
\alias{rmf_write_kdep}
\title{Write a MODFLOW hydraulic conductivity depth-dependence capability file}
\usage{
rmf_write_kdep(
kdep,
file = {
cat("Please select kdep file to overwrite or provide new filename ...\\n")
file.choose()
},
iprn = -1,
...
)
}
\arguments{
\item{kdep}{an \code{RMODFLOW} kdep object}
\item{file}{filename to write to; typically '*.kdep'}
\item{iprn}{format code for printing arrays in the listing file; defaults to -1 (no printing)}
\item{...}{arguments passed to \code{rmfi_write_array}. Can be ignored when arrays are INTERNAL or CONSTANT.}
}
\value{
\code{NULL}
}
\description{
Write a MODFLOW hydraulic conductivity depth-dependence capability file
}
\seealso{
\code{\link{rmf_create_kdep}}, \code{\link{rmf_read_kdep}} and \url{http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?kdep.htm}
}
|
# BOOTSTRAP CHEAT SHEET FOR SHINY ----
# DS4B 202-R ----
# LIBRARIES ----
library(shiny)
library(tidyverse)
library(plotly)
# USER INTERFACE ----
ui <- shiny::fluidPage(
title = "Bootstrap Cheat Sheet for Shiny",
div(
class = "container",
id = "page",
# HEADER ----
h1(class = "page-header", "Bootstrap Cheat Sheet", tags$small("For Shiny")),
p(
class = "lead",
"This cheat sheet is the first part of the ",
a(href = "https://university.business-science.io/", target = "_blank", "Expert Shiny Application Development Course"),
"by Business Science"
),
# 1.0 BOOTSTRAP GRID SYSTEM ----
h2("1.0 Bootstrap Grid System"),
div(
class = "container text-center",
fluidRow(
column(
width = 4,
class = "bg-primary",
p("Grid Width 4")
),
column(
width = 4,
class = "bg-warning",
p("Grid Width 4")
),
column(
width = 4,
class = "bg-danger",
p("Grid Width 4")
)
),
fluidRow(
column(
width = 3,
class = "bg-primary",
p("Grid Width 3")
),
column(
width = 9,
class = "bg-info",
p("Grid Width 9")
)
)
),
hr(),
# 2.0 WORKING WITH TEXT ----
h2("2.0 Working With Text"),
p(class = "lead", "Business Science University helps us learn Shiny"),
fluidRow(
column(
width = 6,
p("We are creating a Boostrap for Shiny cheat sheet."),
p(strong("In section 1"), "we learned about the", strong(em("Bootstrap Grid System."))),
p(tags$mark("In section 2"), ", we learned about working with text in", code("bootstrap"), ".")
),
column(
width = 6,
tags$blockquote(
class = "blockquote-reverse",
p("When learning data science, consistency is more important than quantity."),
tags$footer("Quote by", tags$cite(title = "Matt Dancho", "Matt Dancho"))
)
)
),
hr(),
# 3.0 TEXT ALIGNMENT ----
h2("3.0 Text Alignment"),
div(
class = "container",
id = "text_alignment_1",
p(class = "text-left text-lowercase", "Left-Aligned Lowercase Text"),
p(class = "text-center text-uppercase", "Center-Aligned Uppercase Text"),
p(class = "text-right text-capitalize", "Right-Aligned capitalized text")
),
div(
class = "container",
id = "text_alignment_2",
fluidRow(
p(class = "text-left text-lowercase", "Left-Aligned Lowercase Text") %>% column(width = 4, class = "bg-primary"),
p(class = "text-center text-uppercase", "Center-Aligned Uppercase Text") %>% column(width = 4, class = "bg-success"),
p(class = "text-right text-capitalize", "Right-Aligned capitalized text") %>% column(width = 4, class = "bg-info")
)
),
hr(),
# 4.0 LISTS ----
h2("4.0 Lists"),
tags$ul(
tags$li("Item 1"),
tags$li("Item 2"),
tags$li("Item 3"),
tags$li("Item 4")
),
tags$ol(
tags$li("Item 1"),
tags$li("Item 2"),
tags$li("Item 3"),
tags$li("Item 4")
),
tags$ul(
class = "list-inline",
tags$li("Item 1"),
tags$li("Item 2"),
tags$li("Item 3"),
tags$li("Item 4")
),
hr(),
# 5.0 CONTEXTUAL COLORS & BACKGROUNDS ----
h2("5.0 Contextual Colors & Backgrounds"),
p(class = "text-primary", "Hello R"),
p(class = "text-success", "Hello R"),
p(class = "text-info", "Hello R"),
p(class = "text-warning", "Hello R"),
p(class = "text-danger", "Hello R"),
p(class = "bg-primary", "Hello R"),
p(class = "bg-success", "Hello R"),
p(class = "bg-info", "Hello R"),
p(class = "bg-warning", "Hello R"),
p(class = "bg-danger", "Hello R"),
div(style = "height: 400px;")
)
)
# SERVER ----
server <- function(input, output, session) {
}
shinyApp(ui = ui, server = server)
| /checkpoints/bootstrap_cheat_sheet_for_shiny/app_checkpoint05_context_colors.R | no_license | jimyanau/ds4b_shiny_aws | R | false | false | 4,929 | r | # BOOTSTRAP CHEAT SHEET FOR SHINY ----
# DS4B 202-R ----
# LIBRARIES ----
library(shiny)
library(tidyverse)
library(plotly)
# USER INTERFACE ----
ui <- shiny::fluidPage(
title = "Bootstrap Cheat Sheet for Shiny",
div(
class = "container",
id = "page",
# HEADER ----
h1(class = "page-header", "Bootstrap Cheat Sheet", tags$small("For Shiny")),
p(
class = "lead",
"This cheat sheet is the first part of the ",
a(href = "https://university.business-science.io/", target = "_blank", "Expert Shiny Application Development Course"),
"by Business Science"
),
# 1.0 BOOTSTRAP GRID SYSTEM ----
h2("1.0 Bootstrap Grid System"),
div(
class = "container text-center",
fluidRow(
column(
width = 4,
class = "bg-primary",
p("Grid Width 4")
),
column(
width = 4,
class = "bg-warning",
p("Grid Width 4")
),
column(
width = 4,
class = "bg-danger",
p("Grid Width 4")
)
),
fluidRow(
column(
width = 3,
class = "bg-primary",
p("Grid Width 3")
),
column(
width = 9,
class = "bg-info",
p("Grid Width 9")
)
)
),
hr(),
# 2.0 WORKING WITH TEXT ----
h2("2.0 Working With Text"),
p(class = "lead", "Business Science University helps us learn Shiny"),
fluidRow(
column(
width = 6,
p("We are creating a Boostrap for Shiny cheat sheet."),
p(strong("In section 1"), "we learned about the", strong(em("Bootstrap Grid System."))),
p(tags$mark("In section 2"), ", we learned about working with text in", code("bootstrap"), ".")
),
column(
width = 6,
tags$blockquote(
class = "blockquote-reverse",
p("When learning data science, consistency is more important than quantity."),
tags$footer("Quote by", tags$cite(title = "Matt Dancho", "Matt Dancho"))
)
)
),
hr(),
# 3.0 TEXT ALIGNMENT ----
h2("3.0 Text Alignment"),
div(
class = "container",
id = "text_alignment_1",
p(class = "text-left text-lowercase", "Left-Aligned Lowercase Text"),
p(class = "text-center text-uppercase", "Center-Aligned Uppercase Text"),
p(class = "text-right text-capitalize", "Right-Aligned capitalized text")
),
div(
class = "container",
id = "text_alignment_2",
fluidRow(
p(class = "text-left text-lowercase", "Left-Aligned Lowercase Text") %>% column(width = 4, class = "bg-primary"),
p(class = "text-center text-uppercase", "Center-Aligned Uppercase Text") %>% column(width = 4, class = "bg-success"),
p(class = "text-right text-capitalize", "Right-Aligned capitalized text") %>% column(width = 4, class = "bg-info")
)
),
hr(),
# 4.0 LISTS ----
h2("4.0 Lists"),
tags$ul(
tags$li("Item 1"),
tags$li("Item 2"),
tags$li("Item 3"),
tags$li("Item 4")
),
tags$ol(
tags$li("Item 1"),
tags$li("Item 2"),
tags$li("Item 3"),
tags$li("Item 4")
),
tags$ul(
class = "list-inline",
tags$li("Item 1"),
tags$li("Item 2"),
tags$li("Item 3"),
tags$li("Item 4")
),
hr(),
# 5.0 CONTEXTUAL COLORS & BACKGROUNDS ----
h2("5.0 Contextual Colors & Backgrounds"),
p(class = "text-primary", "Hello R"),
p(class = "text-success", "Hello R"),
p(class = "text-info", "Hello R"),
p(class = "text-warning", "Hello R"),
p(class = "text-danger", "Hello R"),
p(class = "bg-primary", "Hello R"),
p(class = "bg-success", "Hello R"),
p(class = "bg-info", "Hello R"),
p(class = "bg-warning", "Hello R"),
p(class = "bg-danger", "Hello R"),
div(style = "height: 400px;")
)
)
# SERVER ----
server <- function(input, output, session) {
}
shinyApp(ui = ui, server = server)
|
## ---- fig.show='hold'----------------------------------------------------
library(trainR)
data <- c(1,2,3,4,5,6,NA,NA)
percent_missing <- pct_missing(data)
print(percent_missing)
plot(1:10)
plot(10:1)
## ---- echo=FALSE, results='asis'-----------------------------------------
knitr::kable(head(mtcars, 10))
| /inst/doc/How_to_run_pct_missing.R | permissive | lindsayplatt/trainR | R | false | false | 314 | r | ## ---- fig.show='hold'----------------------------------------------------
library(trainR)
data <- c(1,2,3,4,5,6,NA,NA)
percent_missing <- pct_missing(data)
print(percent_missing)
plot(1:10)
plot(10:1)
## ---- echo=FALSE, results='asis'-----------------------------------------
knitr::kable(head(mtcars, 10))
|
###################################################################
#
# This function is part of WACSgen V1.0
# Copyright © 2013,2014,2015, D. Allard, BioSP,
# and Ronan Trépos MIA-T, INRA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warrSanty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. http://www.gnu.org
#
###################################################################
#
# Specific Functions for the validation of WACS simulations, compared to WACS data
#
##################### One Simulation #####################
wacsvalid.Sim = function(wacsdata,wacspar,wacssimul,varname){
sims = wacssimul$sim[which((wacssimul$sim$month != 2) | (wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid.Sim] Warning: Nb days simulated should be
a multiple of 365")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
# All usual variable, including 'tmax'
if (varname !="tmoy"){
y = extract.annual.trend(sims[,varname],spar=wacspar$Trend$Param[1],trend.norm=wacspar$Trend$Param[2])
CentralSim = y[,1]
DeviationSim = y[,2]
if (wacsdata$Trange && (varname=="tmax")){
Obs = wacsdata$data[,"tmin"] + wacsdata$data[,"trange"]
y = extract.annual.trend(Obs,spar=wacspar$Trend$Param[1],trend.norm=wacspar$Trend$Param[2])
CentralObs = y[,1]
DeviationObs = y[,2]
}else{
Obs = wacsdata$data[,varname]
y = extract.annual.trend(Obs,spar=wacspar$Trend$Param[1],trend.norm=wacspar$Trend$Param[2])
CentralObs = y[,1]
DeviationObs = y[,2]
}
zObs = matrix(0,NyObs,365)
zSim = matrix(0,NyObs,365)
for (i in 1:NyObs){
zObs[i,] = wacsdata$data[((i-1)*365+1):(i*365),varname]
}
for (i in 1:NySim){
zSim[i,] = sims[((i-1)*365+1):(i*365),varname]
}
}else{ # If the variable is 'tmoy'
zObs = matrix(0,NyObs,365)
zSim = matrix(0,NyObs,365)
if (wacsdata$Trange){
Obs = wacsdata$data[,"tmin"] + wacsdata$data[,"trange"]/2
Sim = sims[,"tmin"] + sims[,"trange"]/2
}else{
Obs = (wacsdata$data[,"tmin"] + wacsdata$data[,"tmax"])/2
Sim = (sims[,"tmin"] + sims[,"tmax"])/2
}
for (i in 1:NyObs){
zObs[i,] = Obs[((i-1)*365+1):(i*365)]
}
for (i in 1:NySim){
zSim[i,] = Sim[((i-1)*365+1):(i*365)]
}
y = extract.annual.trend(Obs,spar=wacspar$Trend$Param[1],trend.norm=wacspar$Trend$Param[2])
CentralObs = y[,1]
DeviationObs = y[,2]
y = extract.annual.trend(Sim,spar=wacspar$Trend$Param[1],trend.norm=wacspar$Trend$Param[2])
CentralSim = y[,1]
DeviationSim = y[,2]
}
res=list(varname=varname,CentralObs=CentralObs,DeviationObs=DeviationObs,
zObs=zObs,CentralSim=CentralSim,DeviationSim=DeviationSim,zSim=zSim,seasons=wacsdata$seasons)
class(res) = "WACSvalidSim"
return(res)
}
##################### Rain #####################
wacsvalid.Rain = function(wacsdata,wacspar){
nbSeasons = length(wacspar$seasons$day);
res = list();
for (s in 1:nbSeasons){
scale = wacspar$Rain$RainPar[s,1];
shape = wacspar$Rain$RainPar[s,2];
y = sort(wacsdata$data$rain[(wacsdata$data$season == s) &
(wacsdata$data$rain > 0)]);
res[[s]] = list(
theoretical = qgamma(c(1:(length(y)-1))/length(y),
scale=scale, shape=shape),
observed = y[1:(length(y)-1)], par=wacspar$Rain$RainPar[s,]);
}
class(res) = "WACSvalidRain";
return(res)
}
##################### MeanSd #####################
wacsvalid.MeanSd = function(wacsdata,wacssimul,varname){
sims = wacssimul$sim[which((wacssimul$sim$month != 2) |
(wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid] Warning: Nb days simulated should be
a multiple of 365")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
meanObs = matrix(0,NyObs,12)
sdObs = matrix(0,NyObs,12)
meanSim = matrix(0,NySim,12)
sdSim = matrix(0,NySim,12)
for (i in 1:NyObs) {
y = unique(wacsdata$data$year)[i]
for ( j in 1:12 ) {
meanObs[i,j] = mean(wacsdata$data[(wacsdata$data$year == y
& wacsdata$data$month == j), varname])
sdObs[i,j] = sd(wacsdata$data[(wacsdata$data$year == y
& wacsdata$data$month == j), varname])
}
}
for ( i in 1:NySim) {
y = unique(sims$year)[i]
for ( j in 1:12 ) {
meanSim[i,j] = mean(wacssimul$sim[(wacssimul$sim$year == y
& wacssimul$sim$month == j), varname]);
sdSim[i,j] = sd(wacssimul$sim[(wacssimul$sim$year == y
& wacssimul$sim$month == j), varname]);
}
}
res = list(meanObs=meanObs, sdObs=sdObs, meanSim=meanSim, sdSim=sdSim,
varname=varname)
class(res) = "WACSvalidMeanSd";
return(res)
}
##################### BiVar #####################
wacsvalid.BiVar = function(wacsdata,wacssimul,varname,varname2){
sims = wacssimul$sim[which((wacssimul$sim$month != 2) |
(wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid.BiVar] Nb days simulated should be
a multiple of 365")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
month = 1:12
corObs = matrix(0,NyObs,12)
corSim = matrix(0,NySim,12)
for ( i in 1:NyObs) {
y = sort(unique(wacsdata$data$year))[i]
for ( j in 1:12) {
tmp = wacsdata$data[which((wacsdata$data$year == y) &
(wacsdata$data$month == j)),varname]
tmp2 = wacsdata$data[which((wacsdata$data$year == y) &
(wacsdata$data$month == j)),varname2]
corObs[i,j] = cor(tmp, tmp2)
}
}
for ( i in 1:NySim) {
y = sort(unique(sims$year))[i]
for ( j in 1:12) {
tmp = sims[which((sims$year == y) & (sims$month == j)), varname]
tmp2 = sims[which((sims$year == y) & (sims$month == j)), varname2]
corSim[i,j] = cor(tmp, tmp2)
}
}
res = list(corObs= corObs, corSim=corSim, varname=varname,varname2=varname2);
class(res) = "WACSvalidBiVar";
return(res)
}
##################### Temporal Correlation #####################
wacsvalid.CorTemp = function(wacsdata,wacssimul,varname){
options(warn=-1)
sims = wacssimul$sim[which((wacssimul$sim$month != 2) |
(wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid.CorTemp] for 'CorTemp' nb days simulated should be
a multiple of 365")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
month = 1:12
corObs = matrix(0,NyObs,12)
corSim = matrix(0,NySim,12)
for ( i in 1:NyObs) {
y = unique(wacsdata$data$year)[i]
for ( j in 1:12) {
tmp = wacsdata$data[which((wacsdata$data$year == y) & (wacsdata$data$month == j)), varname]
corObs[i,j] = cor(tmp[-1], tmp[-length(tmp)])
}
}
for ( i in 1:NySim) {
y = unique(sims$year)[i]
for ( j in 1:12) {
tmp = sims[which((sims$year == y) & (sims$month == j)), varname]
corSim[i,j] = cor(tmp[-1], tmp[-length(tmp)])
}
}
options(warn=0)
res = list(corObs= corObs, corSim=corSim, varname=varname);
class(res) = "WACSvalidCorTemp";
return(res)
}
##################### SumBase #####################
wacsvalid.SumBase = function(wacsdata,wacssimul,varname,base=0,months=1:12){
sims = wacssimul$sim[which((wacssimul$sim$month != 2) |
(wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid] Nb days simulated should be
a multiple of 365")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
SumObs = rep(0,NyObs)
SumSim = rep(0,NySim)
if (varname=="tmoy"){
tmoyObs = (wacsdata$data$tmin + wacsdata$data$tmax)/2
tmoySim = (sims$tmin + sims$tmax)/2
for (i in 1:NyObs) {
y = unique(wacsdata$data$year)[i]
selObs = (wacsdata$data$year == y) & (wacsdata$data$month %in% months) & (tmoyObs > base)
SumObs[i] = sum(tmoyObs[selObs])
}
for (i in 1:NySim) {
y = unique(sims$year)[i]
selSim = (sims$year == y) & (sims$month %in% months) & (tmoySim > base)
SumSim[i] = sum(tmoySim[selSim])
}
}else{
for (i in 1:NyObs) {
y = unique(wacsdata$data$year)[i]
selObs = (wacsdata$data$year == y) & (wacsdata$data$month %in% months) & (wacsdata$data[,varname] > base)
SumObs[i] = sum(wacsdata$data[selObs,varname])
}
for (i in 1:NySim) {
y = unique(sims$year)[i]
selSim = (sims$year == y) & (sims$month %in% months) & (sims[,varname] > base)
SumSim[i] = sum(sims[selSim,varname])
}
}
res = list(SumObs=SumObs, SumSim=SumSim, varname=varname,base=base)
class(res) = "WACSvalidSumBase"
return(res)
}
##################### Persistence
wacsvalid.Persistence = function(wacsdata,wacssimul,varname,base=0,above=TRUE,months=1:12){
sims = wacssimul$sim[which((wacssimul$sim$month != 2) |
(wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid.Persistence] Nb days simulated should be
a multiple of 365")
}
if (varname == "tmoy"){
stop ("[wacsvalid.Persistence] 'tmoy' cannot be chosen as a variable")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
years = unique(wacsdata$data$year)
z = wacsdata$data[(wacsdata$data$year == years[1]) & (wacsdata$data$month %in% months),varname]
varObs = z
for (i in 2:NyObs) {
z = wacsdata$data[(wacsdata$data$year == years[i]) & (wacsdata$data$month %in% months),varname]
varObs = rbind(varObs,z)
}
years = unique(sims$year)
z = sims[(sims$year == years[1] ) & (sims$month %in%months), varname]
varSim = z
for (i in 2:NySim){
z = sims[(sims$year == years[i] ) & (sims$month %in%months), varname]
varSim = rbind(varSim,z)
}
FreqObs = persistence(varObs,base,above,months)
FreqSim = persistence(varSim,base,above,months)
Persmax = max(which(FreqObs>0),which(FreqSim>0))
FreqObs = FreqObs[1:Persmax]
FreqSim = FreqSim[1:Persmax]
res = list(FreqObs=FreqObs, FreqSim=FreqSim, varname=varname, base=base, above=above)
class(res) = "WACSvalidPersistence"
return(res)
}
persistence = function(Var,base,above,months) {
#####################################################
#
# WACSgen project v2013. Author D. Allard
#
# Function persistence : internal function to compare the persistence of a variable above or below a base
#
# ARGUMENTS :
# Var : variable to be analyzed; it is an array; each line is a separate year
# base : threshold
# above : persistence above threshold if TRUE; below threshold if FALSE
# months: Months to be considered
#
#
Ny = dim(Var)[1]
MaxLength = dim(Var)[2]
Freq = rep(0,MaxLength)
for (y in 1:Ny){
persvar = rep(1,MaxLength)
for (i in 2:MaxLength){
if (above){
if ((Var[y,i] > base) && (Var[y,i-1] > base) ){
persvar[i] = persvar[i-1] + 1
if (i==MaxLength){
Freq[persvar[i]] = Freq[persvar[i]] + 1
}
}
if ((Var[y,i] <= base) && (Var[y,i-1] > base) ){
Freq[persvar[i-1]] = Freq[persvar[i-1]] + 1
}
}else{
if ((Var[y,i] <= base) && (Var[y,i-1] <= base) ){
persvar[i] = persvar[i-1] + 1
if (i==MaxLength){
Freq[persvar[i]] = Freq[persvar[i]] + 1
}
}
if ((Var[y,i] > base) && (Var[y,i-1] <= base) ){
Freq[persvar[i-1]] = Freq[persvar[i-1]] + 1
}
}
}
}
return(Freq)
}
| /WACS/R/wacs.validFunctions.R | no_license | ingted/R-Examples | R | false | false | 13,112 | r | ###################################################################
#
# This function is part of WACSgen V1.0
# Copyright © 2013,2014,2015, D. Allard, BioSP,
# and Ronan Trépos MIA-T, INRA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warrSanty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. http://www.gnu.org
#
###################################################################
#
# Specific Functions for the validation of WACS simulations, compared to WACS data
#
##################### One Simulation #####################
wacsvalid.Sim = function(wacsdata,wacspar,wacssimul,varname){
sims = wacssimul$sim[which((wacssimul$sim$month != 2) | (wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid.Sim] Warning: Nb days simulated should be
a multiple of 365")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
# All usual variable, including 'tmax'
if (varname !="tmoy"){
y = extract.annual.trend(sims[,varname],spar=wacspar$Trend$Param[1],trend.norm=wacspar$Trend$Param[2])
CentralSim = y[,1]
DeviationSim = y[,2]
if (wacsdata$Trange && (varname=="tmax")){
Obs = wacsdata$data[,"tmin"] + wacsdata$data[,"trange"]
y = extract.annual.trend(Obs,spar=wacspar$Trend$Param[1],trend.norm=wacspar$Trend$Param[2])
CentralObs = y[,1]
DeviationObs = y[,2]
}else{
Obs = wacsdata$data[,varname]
y = extract.annual.trend(Obs,spar=wacspar$Trend$Param[1],trend.norm=wacspar$Trend$Param[2])
CentralObs = y[,1]
DeviationObs = y[,2]
}
zObs = matrix(0,NyObs,365)
zSim = matrix(0,NyObs,365)
for (i in 1:NyObs){
zObs[i,] = wacsdata$data[((i-1)*365+1):(i*365),varname]
}
for (i in 1:NySim){
zSim[i,] = sims[((i-1)*365+1):(i*365),varname]
}
}else{ # If the variable is 'tmoy'
zObs = matrix(0,NyObs,365)
zSim = matrix(0,NyObs,365)
if (wacsdata$Trange){
Obs = wacsdata$data[,"tmin"] + wacsdata$data[,"trange"]/2
Sim = sims[,"tmin"] + sims[,"trange"]/2
}else{
Obs = (wacsdata$data[,"tmin"] + wacsdata$data[,"tmax"])/2
Sim = (sims[,"tmin"] + sims[,"tmax"])/2
}
for (i in 1:NyObs){
zObs[i,] = Obs[((i-1)*365+1):(i*365)]
}
for (i in 1:NySim){
zSim[i,] = Sim[((i-1)*365+1):(i*365)]
}
y = extract.annual.trend(Obs,spar=wacspar$Trend$Param[1],trend.norm=wacspar$Trend$Param[2])
CentralObs = y[,1]
DeviationObs = y[,2]
y = extract.annual.trend(Sim,spar=wacspar$Trend$Param[1],trend.norm=wacspar$Trend$Param[2])
CentralSim = y[,1]
DeviationSim = y[,2]
}
res=list(varname=varname,CentralObs=CentralObs,DeviationObs=DeviationObs,
zObs=zObs,CentralSim=CentralSim,DeviationSim=DeviationSim,zSim=zSim,seasons=wacsdata$seasons)
class(res) = "WACSvalidSim"
return(res)
}
##################### Rain #####################
wacsvalid.Rain = function(wacsdata,wacspar){
nbSeasons = length(wacspar$seasons$day);
res = list();
for (s in 1:nbSeasons){
scale = wacspar$Rain$RainPar[s,1];
shape = wacspar$Rain$RainPar[s,2];
y = sort(wacsdata$data$rain[(wacsdata$data$season == s) &
(wacsdata$data$rain > 0)]);
res[[s]] = list(
theoretical = qgamma(c(1:(length(y)-1))/length(y),
scale=scale, shape=shape),
observed = y[1:(length(y)-1)], par=wacspar$Rain$RainPar[s,]);
}
class(res) = "WACSvalidRain";
return(res)
}
##################### MeanSd #####################
wacsvalid.MeanSd = function(wacsdata,wacssimul,varname){
sims = wacssimul$sim[which((wacssimul$sim$month != 2) |
(wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid] Warning: Nb days simulated should be
a multiple of 365")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
meanObs = matrix(0,NyObs,12)
sdObs = matrix(0,NyObs,12)
meanSim = matrix(0,NySim,12)
sdSim = matrix(0,NySim,12)
for (i in 1:NyObs) {
y = unique(wacsdata$data$year)[i]
for ( j in 1:12 ) {
meanObs[i,j] = mean(wacsdata$data[(wacsdata$data$year == y
& wacsdata$data$month == j), varname])
sdObs[i,j] = sd(wacsdata$data[(wacsdata$data$year == y
& wacsdata$data$month == j), varname])
}
}
for ( i in 1:NySim) {
y = unique(sims$year)[i]
for ( j in 1:12 ) {
meanSim[i,j] = mean(wacssimul$sim[(wacssimul$sim$year == y
& wacssimul$sim$month == j), varname]);
sdSim[i,j] = sd(wacssimul$sim[(wacssimul$sim$year == y
& wacssimul$sim$month == j), varname]);
}
}
res = list(meanObs=meanObs, sdObs=sdObs, meanSim=meanSim, sdSim=sdSim,
varname=varname)
class(res) = "WACSvalidMeanSd";
return(res)
}
##################### BiVar #####################
wacsvalid.BiVar = function(wacsdata,wacssimul,varname,varname2){
sims = wacssimul$sim[which((wacssimul$sim$month != 2) |
(wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid.BiVar] Nb days simulated should be
a multiple of 365")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
month = 1:12
corObs = matrix(0,NyObs,12)
corSim = matrix(0,NySim,12)
for ( i in 1:NyObs) {
y = sort(unique(wacsdata$data$year))[i]
for ( j in 1:12) {
tmp = wacsdata$data[which((wacsdata$data$year == y) &
(wacsdata$data$month == j)),varname]
tmp2 = wacsdata$data[which((wacsdata$data$year == y) &
(wacsdata$data$month == j)),varname2]
corObs[i,j] = cor(tmp, tmp2)
}
}
for ( i in 1:NySim) {
y = sort(unique(sims$year))[i]
for ( j in 1:12) {
tmp = sims[which((sims$year == y) & (sims$month == j)), varname]
tmp2 = sims[which((sims$year == y) & (sims$month == j)), varname2]
corSim[i,j] = cor(tmp, tmp2)
}
}
res = list(corObs= corObs, corSim=corSim, varname=varname,varname2=varname2);
class(res) = "WACSvalidBiVar";
return(res)
}
##################### Temporal Correlation #####################
wacsvalid.CorTemp = function(wacsdata,wacssimul,varname){
options(warn=-1)
sims = wacssimul$sim[which((wacssimul$sim$month != 2) |
(wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid.CorTemp] for 'CorTemp' nb days simulated should be
a multiple of 365")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
month = 1:12
corObs = matrix(0,NyObs,12)
corSim = matrix(0,NySim,12)
for ( i in 1:NyObs) {
y = unique(wacsdata$data$year)[i]
for ( j in 1:12) {
tmp = wacsdata$data[which((wacsdata$data$year == y) & (wacsdata$data$month == j)), varname]
corObs[i,j] = cor(tmp[-1], tmp[-length(tmp)])
}
}
for ( i in 1:NySim) {
y = unique(sims$year)[i]
for ( j in 1:12) {
tmp = sims[which((sims$year == y) & (sims$month == j)), varname]
corSim[i,j] = cor(tmp[-1], tmp[-length(tmp)])
}
}
options(warn=0)
res = list(corObs= corObs, corSim=corSim, varname=varname);
class(res) = "WACSvalidCorTemp";
return(res)
}
##################### SumBase #####################
wacsvalid.SumBase = function(wacsdata,wacssimul,varname,base=0,months=1:12){
sims = wacssimul$sim[which((wacssimul$sim$month != 2) |
(wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid] Nb days simulated should be
a multiple of 365")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
SumObs = rep(0,NyObs)
SumSim = rep(0,NySim)
if (varname=="tmoy"){
tmoyObs = (wacsdata$data$tmin + wacsdata$data$tmax)/2
tmoySim = (sims$tmin + sims$tmax)/2
for (i in 1:NyObs) {
y = unique(wacsdata$data$year)[i]
selObs = (wacsdata$data$year == y) & (wacsdata$data$month %in% months) & (tmoyObs > base)
SumObs[i] = sum(tmoyObs[selObs])
}
for (i in 1:NySim) {
y = unique(sims$year)[i]
selSim = (sims$year == y) & (sims$month %in% months) & (tmoySim > base)
SumSim[i] = sum(tmoySim[selSim])
}
}else{
for (i in 1:NyObs) {
y = unique(wacsdata$data$year)[i]
selObs = (wacsdata$data$year == y) & (wacsdata$data$month %in% months) & (wacsdata$data[,varname] > base)
SumObs[i] = sum(wacsdata$data[selObs,varname])
}
for (i in 1:NySim) {
y = unique(sims$year)[i]
selSim = (sims$year == y) & (sims$month %in% months) & (sims[,varname] > base)
SumSim[i] = sum(sims[selSim,varname])
}
}
res = list(SumObs=SumObs, SumSim=SumSim, varname=varname,base=base)
class(res) = "WACSvalidSumBase"
return(res)
}
##################### Persistence
wacsvalid.Persistence = function(wacsdata,wacssimul,varname,base=0,above=TRUE,months=1:12){
sims = wacssimul$sim[which((wacssimul$sim$month != 2) |
(wacssimul$sim$day != 29)),]
if (nrow(sims) %% 365 != 0) {
stop ("[wacsvalid.Persistence] Nb days simulated should be
a multiple of 365")
}
if (varname == "tmoy"){
stop ("[wacsvalid.Persistence] 'tmoy' cannot be chosen as a variable")
}
NyObs = length(unique(wacsdata$data$year))
NySim = length(unique(sims$year))
if (NyObs != NySim) stop("[wacsvalid] Data and Simulations have different length")
years = unique(wacsdata$data$year)
z = wacsdata$data[(wacsdata$data$year == years[1]) & (wacsdata$data$month %in% months),varname]
varObs = z
for (i in 2:NyObs) {
z = wacsdata$data[(wacsdata$data$year == years[i]) & (wacsdata$data$month %in% months),varname]
varObs = rbind(varObs,z)
}
years = unique(sims$year)
z = sims[(sims$year == years[1] ) & (sims$month %in%months), varname]
varSim = z
for (i in 2:NySim){
z = sims[(sims$year == years[i] ) & (sims$month %in%months), varname]
varSim = rbind(varSim,z)
}
FreqObs = persistence(varObs,base,above,months)
FreqSim = persistence(varSim,base,above,months)
Persmax = max(which(FreqObs>0),which(FreqSim>0))
FreqObs = FreqObs[1:Persmax]
FreqSim = FreqSim[1:Persmax]
res = list(FreqObs=FreqObs, FreqSim=FreqSim, varname=varname, base=base, above=above)
class(res) = "WACSvalidPersistence"
return(res)
}
persistence = function(Var,base,above,months) {
#####################################################
#
# WACSgen project v2013. Author D. Allard
#
# Function persistence : internal function to compare the persistence of a variable above or below a base
#
# ARGUMENTS :
# Var : variable to be analyzed; it is an array; each line is a separate year
# base : threshold
# above : persistence above threshold if TRUE; below threshold if FALSE
# months: Months to be considered
#
#
Ny = dim(Var)[1]
MaxLength = dim(Var)[2]
Freq = rep(0,MaxLength)
for (y in 1:Ny){
persvar = rep(1,MaxLength)
for (i in 2:MaxLength){
if (above){
if ((Var[y,i] > base) && (Var[y,i-1] > base) ){
persvar[i] = persvar[i-1] + 1
if (i==MaxLength){
Freq[persvar[i]] = Freq[persvar[i]] + 1
}
}
if ((Var[y,i] <= base) && (Var[y,i-1] > base) ){
Freq[persvar[i-1]] = Freq[persvar[i-1]] + 1
}
}else{
if ((Var[y,i] <= base) && (Var[y,i-1] <= base) ){
persvar[i] = persvar[i-1] + 1
if (i==MaxLength){
Freq[persvar[i]] = Freq[persvar[i]] + 1
}
}
if ((Var[y,i] > base) && (Var[y,i-1] <= base) ){
Freq[persvar[i-1]] = Freq[persvar[i-1]] + 1
}
}
}
}
return(Freq)
}
|
lambda <- 0.2
n <- 40
sim <- 1000
# calculate mean of exponential simulations
set.seed(2020)
mean_sim <- replicate(sim, mean(rexp(n, lambda)), simplify = TRUE)
summary(mean_sim)
# Sample mean vs theoretical mean
## obtain actual mean from summary
summary(mean_sim)[4]
## plot histogram if distribution
hist(mean_sim, breaks = 40, xlim = c(2, 8),
main = "Exponential distribution means", col = "skyblue", xlab = "Mean")
abline(v = mean(mean_sim), lwd = 2, col = "blue", lty = 4)
abline(v = 1 / lambda, lwd = 2, col = "red", lty = 2)
legend("topright", legend = c("actual mean", "theoretic mean"),
col = c("blue", "red"), lty = 4:2)
# Sample variance vs theoretical variance
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
lm(y ~ x - 1)
data("mtcars")
lm(mpg ~ wt, data = mtcars)
x <- c(8.58, 10.46, 9.01, 9.64, 8.86)
(x - mean(x))/sd(x)
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
lm(y ~ x)
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
mean(x)
x <- c(0.61, 0.93, 0.83, 0.35, 0.54, 0.16, 0.91, 0.62, 0.62)
y <- c(0.67, 0.84, 0.6, 0.18, 0.85, 0.47, 1.1, 0.65, 0.36)
fit <- lm(y ~ x)
coef(summary(fit))
summary(fit)$sigma
x <- mtcars$wt
y <- mtcars$mpg
fit <- lm(y ~ x)
predict(fit, newdata = data.frame(x = mean(x)), interval = ("confidence"))
predict(fit, newdata = data.frame(x = 3), interval = ("prediction"))
fit2 <- lm(y ~ I(x/2))
predict(fit2, newdata = data.frame(x - mean(x)), interval = ("prediction"))
| /part1.R | no_license | GeorgyMakarov/statistical-inference-course-project | R | false | false | 1,636 | r |
lambda <- 0.2
n <- 40
sim <- 1000
# calculate mean of exponential simulations
set.seed(2020)
mean_sim <- replicate(sim, mean(rexp(n, lambda)), simplify = TRUE)
summary(mean_sim)
# Sample mean vs theoretical mean
## obtain actual mean from summary
summary(mean_sim)[4]
## plot histogram if distribution
hist(mean_sim, breaks = 40, xlim = c(2, 8),
main = "Exponential distribution means", col = "skyblue", xlab = "Mean")
abline(v = mean(mean_sim), lwd = 2, col = "blue", lty = 4)
abline(v = 1 / lambda, lwd = 2, col = "red", lty = 2)
legend("topright", legend = c("actual mean", "theoretic mean"),
col = c("blue", "red"), lty = 4:2)
# Sample variance vs theoretical variance
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
lm(y ~ x - 1)
data("mtcars")
lm(mpg ~ wt, data = mtcars)
x <- c(8.58, 10.46, 9.01, 9.64, 8.86)
(x - mean(x))/sd(x)
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
lm(y ~ x)
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
mean(x)
x <- c(0.61, 0.93, 0.83, 0.35, 0.54, 0.16, 0.91, 0.62, 0.62)
y <- c(0.67, 0.84, 0.6, 0.18, 0.85, 0.47, 1.1, 0.65, 0.36)
fit <- lm(y ~ x)
coef(summary(fit))
summary(fit)$sigma
x <- mtcars$wt
y <- mtcars$mpg
fit <- lm(y ~ x)
predict(fit, newdata = data.frame(x = mean(x)), interval = ("confidence"))
predict(fit, newdata = data.frame(x = 3), interval = ("prediction"))
fit2 <- lm(y ~ I(x/2))
predict(fit2, newdata = data.frame(x - mean(x)), interval = ("prediction"))
|
library(gridBezier)
x <- c(.4, .66, .6, .9)/3
y <- c(.285, .3, .65, .61)
x1 <- c(.9, .66, .15, .1)/3
y1 <- c(.61, .3, .67, .61)
x2 <- c(.4, .2, .94, .99)/3
y2 <- c(.285, .4, .36, .42)
grid.Bezier(x, y, gp=gpar(lwd=3))
grid.Bezier(x1, y1, gp=gpar(lwd=3))
grid.Bezier(x2, y2, gp=gpar(lwd=3))
| /Talleres/Taller 2/Letra Luis.R | no_license | LuisPenaranda/AnalisisNumerico | R | false | false | 308 | r | library(gridBezier)
x <- c(.4, .66, .6, .9)/3
y <- c(.285, .3, .65, .61)
x1 <- c(.9, .66, .15, .1)/3
y1 <- c(.61, .3, .67, .61)
x2 <- c(.4, .2, .94, .99)/3
y2 <- c(.285, .4, .36, .42)
grid.Bezier(x, y, gp=gpar(lwd=3))
grid.Bezier(x1, y1, gp=gpar(lwd=3))
grid.Bezier(x2, y2, gp=gpar(lwd=3))
|
\name{calc_mc_css}
\alias{calc_mc_css}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Find the monte carlo steady state concentration.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
This function finds the analytical steady state plasma concentration(from calc_analytic_css) for the three compartment steady state model (model = '3compartmentss') using a monte carlo simulation (monte_carlo).
}
\usage{
calc_mc_css(chem.cas=NULL,chem.name=NULL,parameters=NULL,daily.dose=1,
which.quantile=0.95,species="Human",output.units="mg/L",suppress.messages=F,
censored.params=list(Funbound.plasma=list(cv=0.3,lod=0.01)),
vary.params=list(BW=0.3,Vliverc=0.3,Qgfrc=0.3,Qtotal.liverc=0.3,
million.cells.per.gliver=0.3,Clint=0.3),samples=1000,
return.samples=F)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{chem.name}{
Either the chemical parameters, name, or the CAS number must be specified.
%% ~~Describe \code{obs} here~~
}
\item{chem.cas}{
Either the CAS number, parameters, or the chemical name must be specified.
%% ~~Describe \code{pred} here~~
}
\item{parameters}{Parameters from parameterize_steadystate.}
\item{daily.dose}{Total daily dose, mg/kg BW/day.}
\item{which.quantile}{
Which quantile from Monte Carlo simulation is requested. Can be a vector.
%% ~~Describe \code{ssparams.mean} here~~
}
\item{species}{
Species desired (either "Rat", "Rabbit", "Dog", "Mouse", or default "Human").
%% ~~Describe \code{ssparams.var.inv} here~~
}
\item{output.units}{Plasma concentration units, either uM or default mg/L.}
\item{suppress.messages}{Whether or not to suppress output message.}
\item{censored.params}{The parameters listed in censored.params are sampled from a normal distribution
that is censored for values less than the limit of detection (specified separately
for each paramter). This argument should be a list of sub-lists. Each sublist
is named for a parameter in "parameters"
and contains two elements: "CV" (coefficient of variation) and "LOD" (limit of
detection, below which parameter values are censored. New values are sampled
with mean equal to the value in "parameters" and standard deviation equal to the
mean times the CV. Censored values are sampled on a uniform distribution between
0 and the limit of detection.}
\item{vary.params}{The parameters listed in vary.params are sampled from a normal distribution that is
truncated at zero. This argument should be a list of coefficients of variation
(CV) for the normal distribution. Each entry in the list is named for a
parameter in "parameters". New values are sampled with mean equal to the value in
"parameters" and standard deviation equal to the mean times the CV.}
\item{samples}{Number of samples generated in calculating quantiles.}
\item{return.samples}{Whether or not to return the vector containing the samples from the simulation instead of the selected quantile.}
%% ~~Describe \code{pred} here~~
}
\details{
When species is specified as rabbit, dog, or mouse, the function uses the appropriate physiological data(volumes and flows) but substitues human fraction unbound, partition coefficients, and intrinsic hepatic clearance.
}
\author{
John Wambaugh
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
calc_mc_css(chem.name='Bisphenol A',output.units='uM',which.quantile=.9)
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
}
\keyword{Monte Carlo}
\keyword{Steady State}% __ONLY ONE__ keyword per line
| /man/calc_mc_css.Rd | no_license | HQData/httkgui | R | false | false | 3,697 | rd | \name{calc_mc_css}
\alias{calc_mc_css}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Find the monte carlo steady state concentration.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
This function finds the analytical steady state plasma concentration(from calc_analytic_css) for the three compartment steady state model (model = '3compartmentss') using a monte carlo simulation (monte_carlo).
}
\usage{
calc_mc_css(chem.cas=NULL,chem.name=NULL,parameters=NULL,daily.dose=1,
which.quantile=0.95,species="Human",output.units="mg/L",suppress.messages=F,
censored.params=list(Funbound.plasma=list(cv=0.3,lod=0.01)),
vary.params=list(BW=0.3,Vliverc=0.3,Qgfrc=0.3,Qtotal.liverc=0.3,
million.cells.per.gliver=0.3,Clint=0.3),samples=1000,
return.samples=F)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{chem.name}{
Either the chemical parameters, name, or the CAS number must be specified.
%% ~~Describe \code{obs} here~~
}
\item{chem.cas}{
Either the CAS number, parameters, or the chemical name must be specified.
%% ~~Describe \code{pred} here~~
}
\item{parameters}{Parameters from parameterize_steadystate.}
\item{daily.dose}{Total daily dose, mg/kg BW/day.}
\item{which.quantile}{
Which quantile from Monte Carlo simulation is requested. Can be a vector.
%% ~~Describe \code{ssparams.mean} here~~
}
\item{species}{
Species desired (either "Rat", "Rabbit", "Dog", "Mouse", or default "Human").
%% ~~Describe \code{ssparams.var.inv} here~~
}
\item{output.units}{Plasma concentration units, either uM or default mg/L.}
\item{suppress.messages}{Whether or not to suppress output message.}
\item{censored.params}{The parameters listed in censored.params are sampled from a normal distribution
that is censored for values less than the limit of detection (specified separately
for each paramter). This argument should be a list of sub-lists. Each sublist
is named for a parameter in "parameters"
and contains two elements: "CV" (coefficient of variation) and "LOD" (limit of
detection, below which parameter values are censored. New values are sampled
with mean equal to the value in "parameters" and standard deviation equal to the
mean times the CV. Censored values are sampled on a uniform distribution between
0 and the limit of detection.}
\item{vary.params}{The parameters listed in vary.params are sampled from a normal distribution that is
truncated at zero. This argument should be a list of coefficients of variation
(CV) for the normal distribution. Each entry in the list is named for a
parameter in "parameters". New values are sampled with mean equal to the value in
"parameters" and standard deviation equal to the mean times the CV.}
\item{samples}{Number of samples generated in calculating quantiles.}
\item{return.samples}{Whether or not to return the vector containing the samples from the simulation instead of the selected quantile.}
%% ~~Describe \code{pred} here~~
}
\details{
When species is specified as rabbit, dog, or mouse, the function uses the appropriate physiological data(volumes and flows) but substitues human fraction unbound, partition coefficients, and intrinsic hepatic clearance.
}
\author{
John Wambaugh
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
calc_mc_css(chem.name='Bisphenol A',output.units='uM',which.quantile=.9)
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
}
\keyword{Monte Carlo}
\keyword{Steady State}% __ONLY ONE__ keyword per line
|
######################################################################################################################################
######################################################################################################################################
### TransMatMaker -- Builds transition rate matrix for easy use in the main function
######################################################################################################################################
######################################################################################################################################
TransMatMaker.old <- function(hidden.states=FALSE){
if(hidden.states == FALSE){
rate.mat <- matrix(NA, 2, 2)
diag(rate.mat) <- 3
rate.mat[is.na(rate.mat)] <- 1:2
diag(rate.mat) <- NA
rownames(rate.mat) <- c("(0)","(1)")
colnames(rate.mat) <- c("(0)","(1)")
}else{
rate.mat <- matrix(NA, 4, 4)
diag(rate.mat) <- 13
rate.mat[is.na(rate.mat)] <- 1:12
diag(rate.mat) <- NA
rownames(rate.mat) <- c("(0A)","(1A)","(0B)","(1B)")
colnames(rate.mat) <- c("(0A)","(1A)","(0B)","(1B)")
}
return(rate.mat)
}
######################################################################################################################################
######################################################################################################################################
### Various functions for dropping and setting equal parameters in a transition matrix.
######################################################################################################################################
######################################################################################################################################
ParDrop <- function(rate.mat.index=NULL, drop.par=NULL){
if(is.null(rate.mat.index)){
stop("Rate matrix needed. See TransMatMaker() to create one.\n", call.=FALSE)
}
if(is.null(drop.par)){
cat("No parameters indicated to drop. Original matrix returned.\n")
return(rate.mat.index)
}
if(max(rate.mat.index,na.rm=TRUE) < max(drop.par,na.rm=TRUE)){
cat("Some parameters selected for dropping were not in the original matrix.\n")
}
drop.par <- unique(drop.par) # in case parameters listed more than once in drop vector
drop.par <- drop.par[order(drop.par)]
max <- max(rate.mat.index,na.rm=TRUE)
for(drop.which in 1:length(drop.par)){
drop.locs <- which(rate.mat.index == drop.par[drop.which],arr.ind=TRUE)
rate.mat.index[drop.locs] <- NA
}
rate.mat.index[rate.mat.index==0] = NA
max <- max - length(drop.par)
exclude <- which(is.na(rate.mat.index))
gg <- cbind(sort(unique(rate.mat.index[-exclude])), 1:length(unique(rate.mat.index[-exclude])))
for(table.index in 1:length(unique(rate.mat.index[-exclude]))){
rate.mat.index[which(rate.mat.index==gg[table.index,1])] <- gg[table.index,2]
}
rate.mat.index[is.na(rate.mat.index)] = 0
diag(rate.mat.index) = NA
return(rate.mat.index)
}
ParEqual <- function(rate.mat.index=NULL, eq.par=NULL){
if(is.null(rate.mat.index)){
stop("Rate matrix needed. See TransMatMaker() to create one.\n", call.=FALSE)
}
if(is.null(drop) || length(eq.par) < 2){
cat("Fewer than two parameters indicated to equalize. Original matrix returned.\n")
return(rate.mat.index)
}
too.big <- which(eq.par > max(rate.mat.index,na.rm=TRUE))
if(length(too.big) > 0){
cat("Some parameters selected for equalizing were not in the original matrix:\n")
cat("Not in original rate.mat.index:",eq.par[too.big],"\n")
cat("Original matrix returned.\n")
return(rate.mat.index)
}
eq.par <- unique(eq.par)
eq.par <- eq.par[order(eq.par)]
min <- min(eq.par) # rm.na unnecessary?
#the decrement index will hold counters to decrement rate index
dec.index <- matrix(0,length(rate.mat.index[,1]),length(rate.mat.index[1,]))
for(eq.which in 2:length(eq.par)){
to.eq <- which(rate.mat.index == eq.par[eq.which],arr.ind=TRUE)
rate.mat.index[to.eq] <- min
}
#the decrement index will hold counters to decrement rate index
dec.index <- matrix(0,length(rate.mat.index[,1]),length(rate.mat.index[1,]))
for(eq.which in 2:length(eq.par)){
to.dec <- which(rate.mat.index > eq.par[eq.which],arr.ind=TRUE) #greater than current decrementer
dec.index[to.dec] <- dec.index[to.dec] + 1
}
rate.mat.index <- rate.mat.index - dec.index
rate.mat.index[is.na(rate.mat.index)] = 0
diag(rate.mat.index) = NA
return(rate.mat.index)
}
| /R/transMat.old.R | no_license | thej022214/hisse | R | false | false | 4,576 | r | ######################################################################################################################################
######################################################################################################################################
### TransMatMaker -- Builds transition rate matrix for easy use in the main function
######################################################################################################################################
######################################################################################################################################
TransMatMaker.old <- function(hidden.states=FALSE){
if(hidden.states == FALSE){
rate.mat <- matrix(NA, 2, 2)
diag(rate.mat) <- 3
rate.mat[is.na(rate.mat)] <- 1:2
diag(rate.mat) <- NA
rownames(rate.mat) <- c("(0)","(1)")
colnames(rate.mat) <- c("(0)","(1)")
}else{
rate.mat <- matrix(NA, 4, 4)
diag(rate.mat) <- 13
rate.mat[is.na(rate.mat)] <- 1:12
diag(rate.mat) <- NA
rownames(rate.mat) <- c("(0A)","(1A)","(0B)","(1B)")
colnames(rate.mat) <- c("(0A)","(1A)","(0B)","(1B)")
}
return(rate.mat)
}
######################################################################################################################################
######################################################################################################################################
### Various functions for dropping and setting equal parameters in a transition matrix.
######################################################################################################################################
######################################################################################################################################
ParDrop <- function(rate.mat.index=NULL, drop.par=NULL){
if(is.null(rate.mat.index)){
stop("Rate matrix needed. See TransMatMaker() to create one.\n", call.=FALSE)
}
if(is.null(drop.par)){
cat("No parameters indicated to drop. Original matrix returned.\n")
return(rate.mat.index)
}
if(max(rate.mat.index,na.rm=TRUE) < max(drop.par,na.rm=TRUE)){
cat("Some parameters selected for dropping were not in the original matrix.\n")
}
drop.par <- unique(drop.par) # in case parameters listed more than once in drop vector
drop.par <- drop.par[order(drop.par)]
max <- max(rate.mat.index,na.rm=TRUE)
for(drop.which in 1:length(drop.par)){
drop.locs <- which(rate.mat.index == drop.par[drop.which],arr.ind=TRUE)
rate.mat.index[drop.locs] <- NA
}
rate.mat.index[rate.mat.index==0] = NA
max <- max - length(drop.par)
exclude <- which(is.na(rate.mat.index))
gg <- cbind(sort(unique(rate.mat.index[-exclude])), 1:length(unique(rate.mat.index[-exclude])))
for(table.index in 1:length(unique(rate.mat.index[-exclude]))){
rate.mat.index[which(rate.mat.index==gg[table.index,1])] <- gg[table.index,2]
}
rate.mat.index[is.na(rate.mat.index)] = 0
diag(rate.mat.index) = NA
return(rate.mat.index)
}
ParEqual <- function(rate.mat.index=NULL, eq.par=NULL){
if(is.null(rate.mat.index)){
stop("Rate matrix needed. See TransMatMaker() to create one.\n", call.=FALSE)
}
if(is.null(drop) || length(eq.par) < 2){
cat("Fewer than two parameters indicated to equalize. Original matrix returned.\n")
return(rate.mat.index)
}
too.big <- which(eq.par > max(rate.mat.index,na.rm=TRUE))
if(length(too.big) > 0){
cat("Some parameters selected for equalizing were not in the original matrix:\n")
cat("Not in original rate.mat.index:",eq.par[too.big],"\n")
cat("Original matrix returned.\n")
return(rate.mat.index)
}
eq.par <- unique(eq.par)
eq.par <- eq.par[order(eq.par)]
min <- min(eq.par) # rm.na unnecessary?
#the decrement index will hold counters to decrement rate index
dec.index <- matrix(0,length(rate.mat.index[,1]),length(rate.mat.index[1,]))
for(eq.which in 2:length(eq.par)){
to.eq <- which(rate.mat.index == eq.par[eq.which],arr.ind=TRUE)
rate.mat.index[to.eq] <- min
}
#the decrement index will hold counters to decrement rate index
dec.index <- matrix(0,length(rate.mat.index[,1]),length(rate.mat.index[1,]))
for(eq.which in 2:length(eq.par)){
to.dec <- which(rate.mat.index > eq.par[eq.which],arr.ind=TRUE) #greater than current decrementer
dec.index[to.dec] <- dec.index[to.dec] + 1
}
rate.mat.index <- rate.mat.index - dec.index
rate.mat.index[is.na(rate.mat.index)] = 0
diag(rate.mat.index) = NA
return(rate.mat.index)
}
|
install.packages("twitteR")
install.packages("ROAuth")
install.packages("RCurl")
install.packages("stringr")
install.packages("tm")
install.packages("ggmap")
install.packages("dplyr")
install.packages("plyr")
install.packages("wordcloud")
install.packages(c("devtools", "rjson", "bit64", "httr"))
install_github("twitteR", username="geoffjentry")
install.packages("syuzhet")
library(sentimentr)
library(twitteR)
library(ROAuth)
require(RCurl)
library(stringr)
library(tm)
library(ggmap)
library(plyr)
library(dplyr)
library(tm)
library(wordcloud)
library(syuzhet)
# Setting the working directory
setwd('/Users/shivamgoel/Desktop/Final')
# Setting the authentication
api_key <- "R3qtsUiUr25g3EQ9ELhHrzbxm"
api_secret <- "o6qfWHddNfNclF9U1nMaH8stVYfX2gjsWt4rWrhhXnjUrcSUat"
access_token <- "1045087926-W5eIzwjZjfEaHCiRNTmKFaEYNBqA92gMv4XRqTz"
access_token_secret <- "zH4OoG6xPQfv7kgVQzWZRYJWzUBipaNkeaWLA0DHUlx0n"
# Authentication
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
save(setup_twitter_oauth, file="twitter authentication.Rdata")
N=2000 # tweets to request from each query
S=200 # radius in miles
#cities=DC,New York,San Fransisco,Colorado,Mountainview,Tampa,Austin,Boston,
# Seatle,Vegas,Montgomery,Phoenix,Little Rock,Atlanta,Springfield,
# Cheyenne,Bisruk,Helena,Springfield,Madison,Lansing,Salt Lake City,Nashville
# Jefferson City,Raleigh,Harrisburg,Boise,Lincoln,Salem,St. Paul
# Setting the latitudes and longitudes
lats=c(38.9,40.7,37.8,39,37.4,28,30,42.4,48,36,32.3,33.5,34.7,33.8,37.2,41.2,46.8,46.6,37.2,
43,42.7,40.8,36.2,38.6,35.8,40.3,43.6,40.8,44.9,44.9)
lons=c(-77,-74,-122,-105.5,-122,-82.5,-98,-71,-122,-115,-86.3,-112,-92.3,-84.4,-93.3,-104.8,
-100.8,-112, -93.3,-89,-84.5,-111.8,-86.8,-92.2,-78.6,-76.8,-116.2,-98.7,-123,-93)
recession <- NULL
# Getting the twitter data
recession=do.call(rbind,lapply(1:length(lats), function(i) searchTwitter('Demonetisation + India + 2016',
lang="en",n=N,resultType="recent", geocode=paste(lats[i],lons[i],paste0(S,"mi"),sep=","))))
# Getting the latitude and longitude of the tweet, the tweet, re-twitted and favorited count,
# the date and time it was twitted
#recession=do.call(rbind,searchTwitter('Recession + 2008',lang="en",n=N,resultType="recent"))
recessionlat=sapply(recession, function(x) as.numeric(x$getLatitude()))
recessionlat=sapply(recessionlat, function(z) ifelse(length(z)==0,NA,z))
recessionlon=sapply(recession, function(x) as.numeric(x$getLongitude()))
recessionlon=sapply(recessionlon, function(z) ifelse(length(z)==0,NA,z))
recessiondate=lapply(recession, function(x) x$getCreated())
recessiondate=sapply(recessiondate,function(x) strftime(x, format="%Y-%m-%d %H:%M:%S",tz = "UTC"))
recessiontext=sapply(recession, function(x) x$getText())
recessiontext=unlist(recessiontext)
isretweet=sapply(recession, function(x) x$getIsRetweet())
retweeted=sapply(recession, function(x) x$getRetweeted())
retweetcount=sapply(recession, function(x) x$getRetweetCount())
favoritecount=sapply(recession, function(x) x$getFavoriteCount())
favorited=sapply(recession, function(x) x$getFavorited())
# Data Formation
data=as.data.frame(cbind(tweet=recessiontext,date=recessiondate,lat=recessionlat,lon=recessionlon,
isretweet=isretweet,retweeted=retweeted, retweetcount=retweetcount,
favoritecount=favoritecount,favorited=favorited))
usableText=str_replace_all(data$tweet,"[^[:graph:]]", " ")
recessionData<-as.data.frame(usableText)
#View(recessionData)
recessionData$usableText<-as.character(recessionData$usableText)
sentiment = get_sentiment(recessionData$usableText)
sentiment<-as.data.frame(sentiment)
View(RecessionData)
RecessionData<-cbind(sentiment,recessionData$usableText)
RecessionData$sentiment<-as.character(RecessionData$sentiment)
sentiment_label=vector()
sentiment_label<-NULL
for(x in 1:nrow(RecessionData)){
if(RecessionData$sentiment[x]==0){
sentiment_label <- c(sentiment_label, "Neutral")
}else if(RecessionData$sentiment[x]< 0)
{
sentiment_label <- c(sentiment_label, "Negative")
}else if(RecessionData$sentiment[x] > 0)
{
sentiment_label <- c(sentiment_label, "Positive")
}
}
View(sentiment_label)
View(RecessionData)
RecessionData1<-cbind(sentiment_label,recessionData$usableText)
corpus=Corpus(VectorSource(RecessionData1))
# Convert to lower-case
corpus=tm_map(corpus,tolower)
# Remove stopwords
corpus=tm_map(corpus,function(x) removeWords(x,stopwords()))
# convert corpus to a Plain Text Document
corpus=tm_map(corpus,PlainTextDocument)
col=brewer.pal(6,"Dark2")
wordcloud(corpus, min.freq=50, scale=c(5,2),rot.per = 0.25,
random.color=T, max.word=30, random.order=F,colors=col)
counts <- table(sentiment_label)
barplot(counts, main="Sentiments of people on Recession 2008",
xlab="Sentiments", ylab="Number of tweets", col=c("Blue","red", "green"),
legend = rownames(counts), beside=TRUE)
write.csv(counts,"count_India.csv")
| /Economic Analysis/R scripts/Demonitization_Final.R | no_license | hinagandhi/Datascience-Projects | R | false | false | 5,088 | r | install.packages("twitteR")
install.packages("ROAuth")
install.packages("RCurl")
install.packages("stringr")
install.packages("tm")
install.packages("ggmap")
install.packages("dplyr")
install.packages("plyr")
install.packages("wordcloud")
install.packages(c("devtools", "rjson", "bit64", "httr"))
install_github("twitteR", username="geoffjentry")
install.packages("syuzhet")
library(sentimentr)
library(twitteR)
library(ROAuth)
require(RCurl)
library(stringr)
library(tm)
library(ggmap)
library(plyr)
library(dplyr)
library(tm)
library(wordcloud)
library(syuzhet)
# Setting the working directory
setwd('/Users/shivamgoel/Desktop/Final')
# Setting the authentication
api_key <- "R3qtsUiUr25g3EQ9ELhHrzbxm"
api_secret <- "o6qfWHddNfNclF9U1nMaH8stVYfX2gjsWt4rWrhhXnjUrcSUat"
access_token <- "1045087926-W5eIzwjZjfEaHCiRNTmKFaEYNBqA92gMv4XRqTz"
access_token_secret <- "zH4OoG6xPQfv7kgVQzWZRYJWzUBipaNkeaWLA0DHUlx0n"
# Authentication
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
save(setup_twitter_oauth, file="twitter authentication.Rdata")
N=2000 # tweets to request from each query
S=200 # radius in miles
#cities=DC,New York,San Fransisco,Colorado,Mountainview,Tampa,Austin,Boston,
# Seatle,Vegas,Montgomery,Phoenix,Little Rock,Atlanta,Springfield,
# Cheyenne,Bisruk,Helena,Springfield,Madison,Lansing,Salt Lake City,Nashville
# Jefferson City,Raleigh,Harrisburg,Boise,Lincoln,Salem,St. Paul
# Setting the latitudes and longitudes
lats=c(38.9,40.7,37.8,39,37.4,28,30,42.4,48,36,32.3,33.5,34.7,33.8,37.2,41.2,46.8,46.6,37.2,
43,42.7,40.8,36.2,38.6,35.8,40.3,43.6,40.8,44.9,44.9)
lons=c(-77,-74,-122,-105.5,-122,-82.5,-98,-71,-122,-115,-86.3,-112,-92.3,-84.4,-93.3,-104.8,
-100.8,-112, -93.3,-89,-84.5,-111.8,-86.8,-92.2,-78.6,-76.8,-116.2,-98.7,-123,-93)
recession <- NULL
# Getting the twitter data
recession=do.call(rbind,lapply(1:length(lats), function(i) searchTwitter('Demonetisation + India + 2016',
lang="en",n=N,resultType="recent", geocode=paste(lats[i],lons[i],paste0(S,"mi"),sep=","))))
# Getting the latitude and longitude of the tweet, the tweet, re-twitted and favorited count,
# the date and time it was twitted
#recession=do.call(rbind,searchTwitter('Recession + 2008',lang="en",n=N,resultType="recent"))
recessionlat=sapply(recession, function(x) as.numeric(x$getLatitude()))
recessionlat=sapply(recessionlat, function(z) ifelse(length(z)==0,NA,z))
recessionlon=sapply(recession, function(x) as.numeric(x$getLongitude()))
recessionlon=sapply(recessionlon, function(z) ifelse(length(z)==0,NA,z))
recessiondate=lapply(recession, function(x) x$getCreated())
recessiondate=sapply(recessiondate,function(x) strftime(x, format="%Y-%m-%d %H:%M:%S",tz = "UTC"))
recessiontext=sapply(recession, function(x) x$getText())
recessiontext=unlist(recessiontext)
isretweet=sapply(recession, function(x) x$getIsRetweet())
retweeted=sapply(recession, function(x) x$getRetweeted())
retweetcount=sapply(recession, function(x) x$getRetweetCount())
favoritecount=sapply(recession, function(x) x$getFavoriteCount())
favorited=sapply(recession, function(x) x$getFavorited())
# Data Formation
data=as.data.frame(cbind(tweet=recessiontext,date=recessiondate,lat=recessionlat,lon=recessionlon,
isretweet=isretweet,retweeted=retweeted, retweetcount=retweetcount,
favoritecount=favoritecount,favorited=favorited))
usableText=str_replace_all(data$tweet,"[^[:graph:]]", " ")
recessionData<-as.data.frame(usableText)
#View(recessionData)
recessionData$usableText<-as.character(recessionData$usableText)
sentiment = get_sentiment(recessionData$usableText)
sentiment<-as.data.frame(sentiment)
View(RecessionData)
RecessionData<-cbind(sentiment,recessionData$usableText)
RecessionData$sentiment<-as.character(RecessionData$sentiment)
sentiment_label=vector()
sentiment_label<-NULL
for(x in 1:nrow(RecessionData)){
if(RecessionData$sentiment[x]==0){
sentiment_label <- c(sentiment_label, "Neutral")
}else if(RecessionData$sentiment[x]< 0)
{
sentiment_label <- c(sentiment_label, "Negative")
}else if(RecessionData$sentiment[x] > 0)
{
sentiment_label <- c(sentiment_label, "Positive")
}
}
View(sentiment_label)
View(RecessionData)
RecessionData1<-cbind(sentiment_label,recessionData$usableText)
corpus=Corpus(VectorSource(RecessionData1))
# Convert to lower-case
corpus=tm_map(corpus,tolower)
# Remove stopwords
corpus=tm_map(corpus,function(x) removeWords(x,stopwords()))
# convert corpus to a Plain Text Document
corpus=tm_map(corpus,PlainTextDocument)
col=brewer.pal(6,"Dark2")
wordcloud(corpus, min.freq=50, scale=c(5,2),rot.per = 0.25,
random.color=T, max.word=30, random.order=F,colors=col)
counts <- table(sentiment_label)
barplot(counts, main="Sentiments of people on Recession 2008",
xlab="Sentiments", ylab="Number of tweets", col=c("Blue","red", "green"),
legend = rownames(counts), beside=TRUE)
write.csv(counts,"count_India.csv")
|
t2Grey<-function(B0,relax=TRUE){
if(relax){return(1.74*B0+7.77)}else{
return(1/(1.74*B0+7.77)*1000)
}
}
| /R/t2Grey.R | no_license | jonclayden/FIACH | R | false | false | 113 | r | t2Grey<-function(B0,relax=TRUE){
if(relax){return(1.74*B0+7.77)}else{
return(1/(1.74*B0+7.77)*1000)
}
}
|
## These two functions are all about finding the
## inverse of matrices and helping to avoid repetition
## on certain aspects like finding inverse
##makeCacheMatrix will get a matrix to perform the function
makeCacheMatrix <- function(x = matrix())
{
inv <- NULL
set <- function(y)
{
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## cacheSolve is to find the inverse of the given matrix,
##if no such matrix is given then it will pass on the previous value
cacheSolve <- function(x, ...)
{
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv))
{
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | PoojaRaju123/ProgrammingAssignment2 | R | false | false | 945 | r | ## These two functions are all about finding the
## inverse of matrices and helping to avoid repetition
## on certain aspects like finding inverse
##makeCacheMatrix will get a matrix to perform the function
makeCacheMatrix <- function(x = matrix())
{
inv <- NULL
set <- function(y)
{
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## cacheSolve is to find the inverse of the given matrix,
##if no such matrix is given then it will pass on the previous value
cacheSolve <- function(x, ...)
{
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv))
{
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
\name{integrate_it-package}
\alias{integrate_it-package}
\alias{integrate_it}
\docType{package}
\title{
\packageTitle{integrate_it}
}
\description{
\packageDescription{integrate_it}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{integrate_it}
\packageIndices{integrate_it}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
\packageAuthor{integrate_it}
Maintainer: \packageMaintainer{integrate_it}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
| /integrate_it/man/integrate_it-package.Rd | no_license | ScottSolomon66/Applied-Statistical-Programming----Midterm | R | false | false | 832 | rd | \name{integrate_it-package}
\alias{integrate_it-package}
\alias{integrate_it}
\docType{package}
\title{
\packageTitle{integrate_it}
}
\description{
\packageDescription{integrate_it}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{integrate_it}
\packageIndices{integrate_it}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
\packageAuthor{integrate_it}
Maintainer: \packageMaintainer{integrate_it}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
require(data.table)
require(lubridate)
require(lfe)
require(stargazer)
dir_upload <- "./Projects/SEC Letter Project/Data After Review/IPO Uploads/"
ipo <- as.data.table(read.csv("./Projects/SEC Letter Project/Data After Review/ipo_20170510.csv"))
upload <- fread("./Projects/SEC Letter Project/Data After Review/upload_ipo_2004_2016.csv")
upload[, date := ymd(DATE_Filed)]
setkey(upload, CIK, date)
upload[, `:=` (n_letter = length(film), all_authors = paste0(unique(letter_author[!is.na(letter_author)]), collapse = "|")), by = CIK]
ipo <- ipo[Cik_SDC %in% upload$CIK]
m <- match(ipo$Cik_SDC, upload$CIK)
ipo[, `:=`(n_letters = upload$n_letter[m], first_letter = upload$out_filename[m], letter_author = upload$letter_author[m],
letter_author_all = upload$all_authors[m], letter_sender = upload$letter_sender[m], first_letter_date = upload$date[m],
registration_length = as.numeric(as.character(ymd(ipo$Issue_date) - ymd(ipo$Filing_date))), person_id = upload$letter_author[m])]
ipo[, total_letter := .N, by = person_id]
#ipo[total_letter < 20, person_id := "dummy"]
ipo[, n_law := .N, by = Law_firm]
ipo[, law_rank := 0]
ipo[n_law == 1, law_rank := 1]
ipo[n_law == 2, law_rank := 2]
ipo[n_law %in% 3:4, law_rank := 3]
ipo[n_law %in% 5:7, law_rank := 4]
ipo[n_law %in% 8:14, law_rank := 5]
ipo[n_law %in% 15:20, law_rank := 6]
ipo[n_law %in% 21:25, law_rank := 7]
ipo[n_law %in% 26:42, law_rank := 8]
ipo[n_law %in% 43:63, law_rank := 9]
ipo[n_law > 63, law_rank := 10]
upload_all <- fread("./Projects/SEC Letter Project/Data After Review/upload_all_with_signature.csv")
upload_all[, date := ymd(DATE_Filed)]
setkey(upload_all, CIK, date)
upload_all[is.na(letter_author) | letter_author == "NA", letter_author := letter_sender]
for(i in 1:length(ipo$Filing_date))
{
if(i %% 100 == 0) print(i)
fdate <- ymd(ipo$Filing_date[i]) - 0
idate <- ymd(ipo$Filing_date[i]) + 30
if(grepl("CT ORDER", ipo$S1_types[i]))
{
fdate <- fdate - 30
idate <- idate - 30
}
ind1 <- which(upload_all$date >= fdate & upload_all$date <= idate)
ipo$sec_load[i] <- length(ind1)
ipo$sec_load_words[i] <- sum(upload_all$n_words[ind1], na.rm = T)
name <- ipo$letter_author[i]
ind <- which(upload_all$letter_author %in% name)
ipo$person_load[i] <- length(intersect(ind1,ind))
ipo$person_load_words[i] <- sum(upload_all$n_words[intersect(ind1,ind)], na.rm = T)
}
ipo$first_letter_length <- upload_all$n_words[match(ipo$Cik_SDC, upload_all$CIK)]
### winsoring
require(psych)
a <- 0.005
ipo[, `:=` (log_sale = winsor(log(1+sale), a), n_segments = winsor(n_segments, a),
age = winsor(log(1+Year - founding_year), a),
UW_rank = winsor(UW_rank, a), price_update = winsor(price_update, a),
log_S1_words = winsor(log(1+S1_words), a), S1_un = winsor(S1_uncertanty, a),
F_score = winsor(F_score, a), log_words = winsor(log(1+first_letter_length), a),
log_n_letters = winsor(log(1 + n_letters), a),
registration_length = winsor(log(registration_length), a),
log_sec_load_letters = winsor(log(sec_load), a),
log_person_load_letters = winsor(log(1 + person_load), a),
log_sec_load_words = winsor(log(1 + sec_load_words), a),
log_person_load_words = winsor(log(1 + person_load_words), a),
proxy = letter_sender == letter_author)]
ipo <- ipo[first_letter_length > 10]
#### regression
reg_line <- function(x, end)
{
line <- NULL
line[[1]] <- paste0(x, " ~ log_sec_load_letters", end)
line[[2]] <- paste0(x, " ~ log_sec_load_words", end)
line[[3]] <- paste0(x, " ~ log_person_load_letters", end)
line[[4]] <- paste0(x, " ~ log_person_load_words", end)
return(line)
}
end <- " + log_sale + n_segments + age + UW_rank + law_rank + VC + JOBS + log_S1_words +
S1_un|Year + FF_48|0|FF_48"
line_letters <- reg_line("log_n_letters", end)
line_words <- reg_line("log_words", end)
line_length <- reg_line("registration_length", end)
line_update <- reg_line("price_update", end)
line_ir <- reg_line("IR", end)
line_vol <- reg_line("vol", end)
#my_felm <- function(x) felm(as.formula(unlist(x)),
# data = ipo[!grepl("CT ORDER", ipo$S1_types),])
my_felm <- function(x) felm(as.formula(unlist(x)), data = ipo)
model_letters <- lapply(line_letters, my_felm)
model_length <- lapply(line_length, my_felm)
model_words <- lapply(line_words, my_felm)
model_update <- lapply(line_update, my_felm)
model_ir <- lapply(line_ir, my_felm)
model_vol <- lapply(line_vol, my_felm)
FE_line <- list(c("Industry and Year FE", rep("YES", 4)))
varnames <- c("SEC load, letters", "SEC load, words", "Person load, letters", "Person load, words",
"Sales", "Number of Segments", "Age", "UW Rank", "Law Firm Rank", "VC Dummy",
"JOBS Act Dummy", "Prospectus Length", "Prospectus Uncertanty")
out_type = "latex"
out1 <- stargazer(model_letters, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Number of SEC Letters", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
out2 <- stargazer(model_words, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Number of Words in the 1st SEC Letter", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
out3 <- stargazer(model_length, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Registration Length", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
out4 <- stargazer(model_update, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Price Update", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
out5 <- stargazer(model_ir, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Initial Returns", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
out6 <- stargazer(model_vol, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Volatility", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
start <- readLines("./R codes/table_start.tex")
file <- c(start, out1,out2,out3,out4, out5, out6, "\\end{document}")
write(file, "file.tex")
require(tools)
texi2pdf("file.tex", clean = T)
| /testing.R | no_license | pmav99/SEC_Letters_Codes | R | false | false | 6,377 | r | require(data.table)
require(lubridate)
require(lfe)
require(stargazer)
dir_upload <- "./Projects/SEC Letter Project/Data After Review/IPO Uploads/"
ipo <- as.data.table(read.csv("./Projects/SEC Letter Project/Data After Review/ipo_20170510.csv"))
upload <- fread("./Projects/SEC Letter Project/Data After Review/upload_ipo_2004_2016.csv")
upload[, date := ymd(DATE_Filed)]
setkey(upload, CIK, date)
upload[, `:=` (n_letter = length(film), all_authors = paste0(unique(letter_author[!is.na(letter_author)]), collapse = "|")), by = CIK]
ipo <- ipo[Cik_SDC %in% upload$CIK]
m <- match(ipo$Cik_SDC, upload$CIK)
ipo[, `:=`(n_letters = upload$n_letter[m], first_letter = upload$out_filename[m], letter_author = upload$letter_author[m],
letter_author_all = upload$all_authors[m], letter_sender = upload$letter_sender[m], first_letter_date = upload$date[m],
registration_length = as.numeric(as.character(ymd(ipo$Issue_date) - ymd(ipo$Filing_date))), person_id = upload$letter_author[m])]
ipo[, total_letter := .N, by = person_id]
#ipo[total_letter < 20, person_id := "dummy"]
ipo[, n_law := .N, by = Law_firm]
ipo[, law_rank := 0]
ipo[n_law == 1, law_rank := 1]
ipo[n_law == 2, law_rank := 2]
ipo[n_law %in% 3:4, law_rank := 3]
ipo[n_law %in% 5:7, law_rank := 4]
ipo[n_law %in% 8:14, law_rank := 5]
ipo[n_law %in% 15:20, law_rank := 6]
ipo[n_law %in% 21:25, law_rank := 7]
ipo[n_law %in% 26:42, law_rank := 8]
ipo[n_law %in% 43:63, law_rank := 9]
ipo[n_law > 63, law_rank := 10]
upload_all <- fread("./Projects/SEC Letter Project/Data After Review/upload_all_with_signature.csv")
upload_all[, date := ymd(DATE_Filed)]
setkey(upload_all, CIK, date)
upload_all[is.na(letter_author) | letter_author == "NA", letter_author := letter_sender]
for(i in 1:length(ipo$Filing_date))
{
if(i %% 100 == 0) print(i)
fdate <- ymd(ipo$Filing_date[i]) - 0
idate <- ymd(ipo$Filing_date[i]) + 30
if(grepl("CT ORDER", ipo$S1_types[i]))
{
fdate <- fdate - 30
idate <- idate - 30
}
ind1 <- which(upload_all$date >= fdate & upload_all$date <= idate)
ipo$sec_load[i] <- length(ind1)
ipo$sec_load_words[i] <- sum(upload_all$n_words[ind1], na.rm = T)
name <- ipo$letter_author[i]
ind <- which(upload_all$letter_author %in% name)
ipo$person_load[i] <- length(intersect(ind1,ind))
ipo$person_load_words[i] <- sum(upload_all$n_words[intersect(ind1,ind)], na.rm = T)
}
ipo$first_letter_length <- upload_all$n_words[match(ipo$Cik_SDC, upload_all$CIK)]
### winsoring
require(psych)
a <- 0.005
ipo[, `:=` (log_sale = winsor(log(1+sale), a), n_segments = winsor(n_segments, a),
age = winsor(log(1+Year - founding_year), a),
UW_rank = winsor(UW_rank, a), price_update = winsor(price_update, a),
log_S1_words = winsor(log(1+S1_words), a), S1_un = winsor(S1_uncertanty, a),
F_score = winsor(F_score, a), log_words = winsor(log(1+first_letter_length), a),
log_n_letters = winsor(log(1 + n_letters), a),
registration_length = winsor(log(registration_length), a),
log_sec_load_letters = winsor(log(sec_load), a),
log_person_load_letters = winsor(log(1 + person_load), a),
log_sec_load_words = winsor(log(1 + sec_load_words), a),
log_person_load_words = winsor(log(1 + person_load_words), a),
proxy = letter_sender == letter_author)]
ipo <- ipo[first_letter_length > 10]
#### regression
reg_line <- function(x, end)
{
line <- NULL
line[[1]] <- paste0(x, " ~ log_sec_load_letters", end)
line[[2]] <- paste0(x, " ~ log_sec_load_words", end)
line[[3]] <- paste0(x, " ~ log_person_load_letters", end)
line[[4]] <- paste0(x, " ~ log_person_load_words", end)
return(line)
}
end <- " + log_sale + n_segments + age + UW_rank + law_rank + VC + JOBS + log_S1_words +
S1_un|Year + FF_48|0|FF_48"
line_letters <- reg_line("log_n_letters", end)
line_words <- reg_line("log_words", end)
line_length <- reg_line("registration_length", end)
line_update <- reg_line("price_update", end)
line_ir <- reg_line("IR", end)
line_vol <- reg_line("vol", end)
#my_felm <- function(x) felm(as.formula(unlist(x)),
# data = ipo[!grepl("CT ORDER", ipo$S1_types),])
my_felm <- function(x) felm(as.formula(unlist(x)), data = ipo)
model_letters <- lapply(line_letters, my_felm)
model_length <- lapply(line_length, my_felm)
model_words <- lapply(line_words, my_felm)
model_update <- lapply(line_update, my_felm)
model_ir <- lapply(line_ir, my_felm)
model_vol <- lapply(line_vol, my_felm)
FE_line <- list(c("Industry and Year FE", rep("YES", 4)))
varnames <- c("SEC load, letters", "SEC load, words", "Person load, letters", "Person load, words",
"Sales", "Number of Segments", "Age", "UW Rank", "Law Firm Rank", "VC Dummy",
"JOBS Act Dummy", "Prospectus Length", "Prospectus Uncertanty")
out_type = "latex"
out1 <- stargazer(model_letters, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Number of SEC Letters", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
out2 <- stargazer(model_words, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Number of Words in the 1st SEC Letter", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
out3 <- stargazer(model_length, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Registration Length", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
out4 <- stargazer(model_update, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Price Update", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
out5 <- stargazer(model_ir, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Initial Returns", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
out6 <- stargazer(model_vol, type = out_type, omit.stat = c("ser", "f"),
dep.var.caption = "Volatility", dep.var.labels.include = F,
add.lines = FE_line, covariate.labels = varnames)
start <- readLines("./R codes/table_start.tex")
file <- c(start, out1,out2,out3,out4, out5, out6, "\\end{document}")
write(file, "file.tex")
require(tools)
texi2pdf("file.tex", clean = T)
|
#' Wrapper function of \code{MatH} class
#'
#' This function create a matrix of histogram data, i.e. a \code{MatH}
#' object
#'
#' @name MatH
#' @rdname MatH-class
#' @export
#' @param x (optional, default= an empty \code{distributionH} object) a list of
#' \code{distributionH} objects
#' @param nrows (optional, default=1)an integer, the number of rows.
#' @param ncols (optional, default=1) an integer, the number of columns (aka
#' variables).
#' @param rownames (optional, default=NULL) a list of strings containing the
#' names of the rows.
#' @param varnames (optional, default=NULL) a list of strings containing the
#' names of the columns (aka variables).
#' @param by.row (optional, default=FALSE) a logical value, TRUE the matrix is
#' row wise filled, FALSE the matrix is filled column wise.
#' @return A \code{matH} object
#' @examples
#'
#' # bulding an empty 10 by 4 matrix of histograms
#' MAT <- MatH(nrows = 10, ncols = 4)
MatH <- function(x = NULL, nrows = 1, ncols = 1, rownames = NULL, varnames = NULL, by.row = FALSE) {
MAT <- new("MatH",
nrows = nrows,
ncols = ncols,
ListOfDist = x,
names.rows = rownames,
names.cols = varnames, by.row = by.row
)
return(MAT)
}
# overriding of "[" operator for MatH object ----
#' extract from a MatH Method [
#' @name [
#' @rdname extract-methods
#' @aliases [,MatH,ANY,ANY,ANY-method
#' [,MatH-method
#' @description This method overrides the "[" operator for a \code{matH} object.
#' @param x a \code{matH} object
#' @param i a set of integer values identifying the rows
#' @param j a set of integer values identifying the columns
#' @param ... not useful
#' @param drop a logical value inherited from the basic method "[" but not used (default=TRUE)
#' @return A \code{matH} object
#' @examples
#' D <- BLOOD # the BLOOD dataset
#' SUB_D <- BLOOD[c(1, 2, 5), c(1, 2)]
#' @importFrom stats variable.names
#' @export
setMethod(
"[",
signature(x = "MatH"),
function(x, i, j, ..., drop = TRUE) {
if (missing(i) && missing(j)) {
i <- c(1:nrow(x@M))
j <- c(1:ncol(x@M))
}
else {
if (missing(i)) i <- c(1:nrow(x@M))
if (missing(j)) j <- c(1:ncol(x@M))
}
# consider negative indexes!TO BE DONE!!
if (min(i) <= 0 | min(j) <= 0) {
stop("negative indexes are not allowed in subsetting [,] a MatH object")
}
x@M <- matrix(x@M[i, j],
nrow = length(i), ncol = length(j),
dimnames = list(row.names(x@M)[i], colnames(x@M)[j])
)
return(x)
}
)
# methods for getting information from a MatH
setGeneric("get.MatH.nrows", function(object) standardGeneric("get.MatH.nrows"))
#' Method get.MatH.nrows
#' @name get.MatH.nrows
#' @description It returns the number of rows of a \code{MatH} object
#' @param object a \code{MatH} object
#' @return An integer, the number of rows.
#' @exportMethod get.MatH.nrows
#' @rdname get.MatH.nrows-methods
#' @aliases get.MatH.nrows,MatH-method
setMethod(
f = "get.MatH.nrows", signature = c(object = "MatH"),
function(object) {
return(nrow(object@M))
}
)
#' Method get.MatH.ncols
#' @name get.MatH.ncols
#' @rdname get.MatH.ncols-methods
#' @exportMethod get.MatH.ncols
setGeneric("get.MatH.ncols", function(object) standardGeneric("get.MatH.ncols"))
#' @rdname get.MatH.ncols-methods
#' @aliases get.MatH.ncols,MatH-method
#' @description It returns the number of columns of a \code{MatH} object
#' @param object a \code{MatH} object
#' @return An integer, the number of columns.
setMethod(
f = "get.MatH.ncols", signature = c(object = "MatH"),
function(object) {
return(ncol(object@M))
}
)
#' Method get.MatH.rownames
#' @name get.MatH.rownames
#' @rdname get.MatH.rownames-methods
#' @exportMethod get.MatH.rownames
setGeneric("get.MatH.rownames", function(object) standardGeneric("get.MatH.rownames"))
#' @rdname get.MatH.rownames-methods
#' @aliases get.MatH.rownames,MatH-method
#' @description It returns the labels of the rows of a \code{MatH} object
#' @param object a \code{MatH} object
#' @return A vector of char, the label of the rows.
setMethod(
f = "get.MatH.rownames", signature = c(object = "MatH"),
function(object) {
return(rownames(object@M))
}
)
#' Method get.MatH.varnames
#' @name get.MatH.varnames
#' @rdname get.MatH.varnames-methods
#' @exportMethod get.MatH.varnames
setGeneric("get.MatH.varnames", function(object) standardGeneric("get.MatH.varnames"))
#' @rdname get.MatH.varnames-methods
#' @aliases get.MatH.varnames,MatH-method
#' @description It returns the labels of the columns, or the names of the variables, of a \code{MatH} object
#' @param object a \code{MatH} object
#' @return A vector of char, the labels of the columns, or the names of the variables.
setMethod(
f = "get.MatH.varnames", signature = c(object = "MatH"),
function(object) {
return(colnames(object@M))
}
)
#' Method get.MatH.main.info
#' @name get.MatH.main.info
#' @rdname get.MatH.main.info-methods
#' @exportMethod get.MatH.main.info
setGeneric("get.MatH.main.info", function(object) standardGeneric("get.MatH.main.info"))
#' @rdname get.MatH.main.info-methods
#' @aliases get.MatH.main.info,MatH-method
#' @description It returns the number of rows, of columns the labels of rows and columns of a \code{MatH} object.
#' @param object a \code{MatH} object
#' @return A list of char, the labels of the columns, or the names of the variables.
#' @slot nrows - the number of rows
#' @slot ncols - the number of columns
#' @slot rownames - a vector of char, the names of rows
#' @slot varnames - a vector of char, the names of columns
#'
setMethod(
f = "get.MatH.main.info", signature = c(object = "MatH"),
function(object) {
return(list(
nrows = get.MatH.nrows(object), ncols = get.MatH.ncols(object),
rownames = get.MatH.rownames(object), varnames = get.MatH.varnames(object)
))
}
)
#' Method get.MatH.stats
#' @name get.MatH.stats
#' @rdname get.MatH.stats-methods
#' @exportMethod get.MatH.stats
setGeneric("get.MatH.stats", function(object, ...) standardGeneric("get.MatH.stats"))
#' @rdname get.MatH.stats-methods
#' @aliases get.MatH.stats,MatH-method
#' @description It returns statistics for each distribution contained in a \code{MatH} object.
#' @param object a \code{MatH} object
#' @param ... a set of other parameters
#' @param stat (optional) a string containing the required statistic. Default='mean'\cr
#' - \code{stat='mean'} - for computing the mean of each histogram\cr
#' - \code{stat='median'} - for computing the median of each histogram\cr
#' - \code{stat='min'} - for computing the minimum of each histogram\cr
#' - \code{stat='max'} - for computing the maximum of each histogram\cr
#' - \code{stat='std'} - for computing the standard deviatio of each histogram\cr
#' - \code{stat='skewness'} - for computing the skewness of each histogram\cr
#' - \code{stat='kurtosis'} - for computing the kurtosis of each histogram\cr
#' - \code{stat='quantile'} - for computing the quantile ot level \code{prob} of each histogram\cr
#' @param prob (optional)a number between 0 and 1 for computing the value once choosen the \code{'quantile'} option for \code{stat}.
#' @return A list
#' @slot stat - the chosen statistic
#' @slot prob - level of probability if stat='quantile'
#' @slot MAT - a matrix of values
#' @examples
#' get.MatH.stats(BLOOD) # the means of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "median") # the medians of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "quantile", prob = 0.5) # the same as median
#' get.MatH.stats(BLOOD, stat = "min") # minima of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "quantile", prob = 0) # the same as min
#' get.MatH.stats(BLOOD, stat = "max") # maxima of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "quantile", prob = 1) # the same as max
#' get.MatH.stats(BLOOD, stat = "std") # standard deviations of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "skewness") # skewness indices of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "kurtosis") # kurtosis indices of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "quantile", prob = 0.05)
#' # the fifth percentiles of distributions in BLOOD dataset
setMethod(
f = "get.MatH.stats", signature = c(object = "MatH"),
function(object, stat = "mean", prob = 0.5) {
r <- get.MatH.nrows(object)
c <- get.MatH.ncols(object)
MAT <- matrix(NA, get.MatH.nrows(object), get.MatH.ncols(object))
rownames(MAT) <- get.MatH.rownames(object)
colnames(MAT) <- get.MatH.varnames(object)
for (i in 1:r) {
for (j in 1:c) {
if (length(object@M[i, j][[1]]@x) > 0) {
if (stat == "mean") {
MAT[i, j] <- object@M[i, j][[1]]@m
}
if (stat == "std") {
MAT[i, j] <- object@M[i, j][[1]]@s
}
if (stat == "skewness") {
MAT[i, j] <- skewH(object@M[i, j][[1]])
}
if (stat == "kurtosis") {
MAT[i, j] <- kurtH(object@M[i, j][[1]])
}
if (stat == "median") {
MAT[i, j] <- compQ(object = object@M[i, j][[1]], p = 0.5)
}
if (stat == "quantile") {
MAT[i, j] <- compQ(object = object@M[i, j][[1]], p = prob)
}
if (stat == "min") {
MAT[i, j] <- compQ(object = object@M[i, j][[1]], p = 0)
}
if (stat == "max") {
MAT[i, j] <- compQ(object = object@M[i, j][[1]], p = 1)
}
}
}
}
if (stat == "quantile") {
return(list(stat = stat, prob = prob, mat = MAT))
} else {
return(list(stat = stat, mat = MAT))
}
}
)
# methods for collating by row or by column two MatHs ----
#' Method WH.bind.row
#' @name WH.bind.row
#' @rdname WH.bind.row-methods
#' @exportMethod WH.bind.row
setGeneric("WH.bind.row", function(object1, object2) standardGeneric("WH.bind.row")) #
#' Method WH.bind.col
#' @name WH.bind.col
#' @rdname WH.bind.col-methods
#' @exportMethod WH.bind.col
setGeneric("WH.bind.col", function(object1, object2) standardGeneric("WH.bind.col")) #
#' Method WH.bind
#' @name WH.bind
#' @rdname WH.bind-methods
#' @exportMethod WH.bind
setGeneric("WH.bind", function(object1, object2, byrow) standardGeneric("WH.bind")) #
#' @rdname WH.bind.row-methods
#' @aliases WH.bind.row,MatH-method
#' @description It attaches two \code{MatH} objects with the same columns by row.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @return a \code{MatH} object,
#' @examples
#' M1 <- BLOOD[1:3, ]
#' M2 <- BLOOD[5:8, ]
#' MAT <- WH.bind.row(M1, M2)
setMethod(
f = "WH.bind.row", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2) {
ncol1 <- ncol(object1@M)
ncol2 <- ncol(object2@M)
nrow1 <- nrow(object1@M)
nrow2 <- nrow(object2@M)
if (ncol1 != ncol2) {
stop("The two matrix must have the same number of columns")
}
object1@M <- rbind(object1@M, object2@M)
return(object1)
}
)
#' @rdname WH.bind.col-methods
#' @aliases WH.bind.col,MatH-method
#' @description It attaches two \code{MatH} objects with the same rows by colums.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @return a \code{MatH} object,
#' @examples
#' M1 <- BLOOD[1:10, 1]
#' M2 <- BLOOD[1:10, 3]
#' MAT <- WH.bind.col(M1, M2)
setMethod(
f = "WH.bind.col", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2) {
ncol1 <- ncol(object1@M)
ncol2 <- ncol(object2@M)
nrow1 <- nrow(object1@M)
nrow2 <- nrow(object2@M)
if (nrow1 != nrow2) {
stop("The two matrix must have the same number of rows")
}
# NewMat=new("MatH", nrows=nrow1,ncols=ncol1+ncol2)
object1@M <- cbind(object1@M, object2@M)
return(object1)
}
)
#' @rdname WH.bind-methods
#' @aliases WH.bind,MatH-method
#' @description It attaches two \code{MatH} objects with the same columns by row, or the same rows by colum.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @param byrow a logical value (default=TRUE) attaches the objects by row
#' @return a \code{MatH} object,
#' @examples
#' # binding by row
#' M1 <- BLOOD[1:10, 1]
#' M2 <- BLOOD[1:10, 3]
#' MAT <- WH.bind(M1, M2, byrow = TRUE)
#' # binding by col
#' M1 <- BLOOD[1:10, 1]
#' M2 <- BLOOD[1:10, 3]
#' MAT <- WH.bind(M1, M2, byrow = FALSE)
#' @seealso \code{\link{WH.bind.row}} for binding by row, \code{\link{WH.bind.col}} for binding by column
setMethod(
f = "WH.bind", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2, byrow = TRUE) {
ncol1 <- ncol(object1@M)
ncol2 <- ncol(object2@M)
nrow1 <- nrow(object1@M)
nrow2 <- nrow(object2@M)
if (byrow == TRUE) {
NewMat <- WH.bind.row(object1, object2)
}
else {
NewMat <- WH.bind.col(object1, object2)
}
return(NewMat)
}
)
# methods for MatH based on the L2 Wasserstein distance between distributions ----
#' Method WH.mat.sum
#' @name WH.mat.sum
#' @rdname WH.mat.sum-methods
#' @exportMethod WH.mat.sum
setGeneric("WH.mat.sum", function(object1, object2) standardGeneric("WH.mat.sum")) # ok matrix sum
#' Method WH.mat.prod
#' @name WH.mat.prod
#' @rdname WH.mat.prod-methods
#' @exportMethod WH.mat.prod
setGeneric("WH.mat.prod", function(object1, object2, ...) standardGeneric("WH.mat.prod")) # ok matrix product
#' @rdname WH.mat.sum-methods
#' @aliases WH.mat.sum,MatH-method
#' @description It sums two \code{MatH} objects, i.e. two matrices of distributions,
#' by summing the quantile functions of histograms. This sum is consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @return a \code{MatH} object,
#' @examples
#' # binding by row
#' M1 <- BLOOD[1:5, ]
#' M2 <- BLOOD[6:10, ]
#' MAT <- WH.mat.sum(M1, M2)
setMethod(
f = "WH.mat.sum", signature = c(object1 = "MatH", object2 = "MatH"),
# sums two MatH, i.e. two matrices of distributionsH
# INPUT:
# OUTPUT:
function(object1, object2) {
nrows1 <- nrow(object1@M)
ncols1 <- ncol(object1@M)
nrows2 <- nrow(object1@M)
ncols2 <- ncol(object1@M)
if (!identical(dim(object1@M), dim(object2@M))) {
stop("the two matrices must be of the same dimension")
}
else {
MATS <- object1
TMP <- new("MatH", 1, 2)
for (r in 1:nrows1) {
for (c in 1:ncols1) {
TMP@M[1, 1][[1]] <- object1@M[r, c][[1]]
TMP@M[1, 2][[1]] <- object2@M[r, c][[1]]
TMP <- registerMH(TMP)
MATS@M[r, c][[1]] <- new(
"distributionH",
(TMP@M[1, 1][[1]]@x + TMP@M[1, 2][[1]]@x),
TMP@M[1, 1][[1]]@p,
(TMP@M[1, 1][[1]]@m + TMP@M[1, 2][[1]]@m)
)
}
}
}
return(MATS)
}
)
#' @rdname WH.mat.prod-methods
#' @aliases WH.mat.prod,MatH-method
#' @description It is the matrix product of two \code{MatH} objects, i.e. two matrices of distributions,
#' by using the dot product of two histograms that is consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @param ... other optional parameters
#' @param traspose1 a logical value, default=FALSE. If TRUE trasposes object1
#' @param traspose2 a logical value, default=FALSE. If TRUE trasposes object2
#' @return a matrix of numbers
#' @examples
#'
#' M1 <- BLOOD[1:5, ]
#' M2 <- BLOOD[6:10, ]
#' MAT <- WH.mat.prod(M1, M2, traspose1 = TRUE, traspose2 = FALSE)
setMethod(
f = "WH.mat.prod", signature = c(object1 = "MatH", object2 = "MatH"),
# sums two MatH, i.e. two matrics of distributionsH
# INPUT:
# OUTPUT:
function(object1, object2, traspose1 = FALSE, traspose2 = FALSE) {
if (traspose1 == TRUE) {
# trasposing the first matrix
object1@M <- t(object1@M)
}
if (traspose2 == TRUE) {
# trasposing the second matrix
object2@M <- t(object2@M)
}
nrows1 <- nrow(object1@M)
ncols1 <- ncol(object1@M)
nrows2 <- nrow(object2@M)
ncols2 <- ncol(object2@M)
if (ncols1 != nrows2) {
cat(
"Fisrt matrix dimensions ", nrow(object1@M), "x", ncol(object1@M), "\n",
"Second matrix dimensions ", nrow(object2@M), "x", ncol(object2@M), "\n"
)
stop("Dimensions of matrices are not compatible")
}
MAT <- matrix(0, nrows1, ncols2)
# cat("Fisrt matrix dimensions ", nrow(object1@M), "x", ncol(object1@M), "\n",
# "Second matrix dimensions ", nrow(object2@M), "x", ncol(object2@M), "\n")
for (r in 1:nrows1) {
for (c in 1:ncols2) {
for (els in 1:ncols1) {
MAT[r, c] <- MAT[r, c] + dotpW(object1@M[r, els][[1]], object2@M[els, c][[1]])
}
}
}
return(MAT)
}
)
# L2 Wasserstein basic operations and basic statistics for matrices of distributionH ----
#' Method WH.vec.sum
#' @name WH.vec.sum
#' @rdname WH.vec.sum-methods
#' @exportMethod WH.vec.sum
setGeneric("WH.vec.sum", function(object, ...) standardGeneric("WH.vec.sum")) # OK weighted sum of a vector of distributionH
#' Method WH.vec.mean
#' @name WH.vec.mean
#' @rdname WH.vec.mean-methods
#' @exportMethod WH.vec.mean
setGeneric("WH.vec.mean", function(object, ...) standardGeneric("WH.vec.mean")) # OK weighted mean of a vector of distributionH
#' Method WH.SSQ
#' @name WH.SSQ
#' @rdname WH.SSQ-methods
#' @exportMethod WH.SSQ
setGeneric("WH.SSQ", function(object, ...) standardGeneric("WH.SSQ")) # weighted de-codeviance matrix
#' Method WH.var.covar
#' @name WH.var.covar
#' @rdname WH.var.covar-methods
#' @exportMethod WH.var.covar
setGeneric("WH.var.covar", function(object, ...) standardGeneric("WH.var.covar")) # weighted variance variance matrix
#' Method WH.correlation
#' @name WH.correlation
#' @rdname WH.correlation-methods
#' @exportMethod WH.correlation
setGeneric("WH.correlation", function(object, ...) standardGeneric("WH.correlation")) # weighted corelation matrix
#' Method WH.SSQ2
#' @name WH.SSQ2
#' @rdname WH.SSQ2-methods
#' @exportMethod WH.SSQ2
setGeneric("WH.SSQ2", function(object1, object2, ...) standardGeneric("WH.SSQ2")) # weighted de-codeviance matrix
#' Method WH.var.covar2
#' @name WH.var.covar2
#' @rdname WH.var.covar2-methods
#' @exportMethod WH.var.covar2
setGeneric("WH.var.covar2", function(object1, object2, ...) standardGeneric("WH.var.covar2")) # weighted variance variance matrix
#' Method WH.correlation2
#' @name WH.correlation2
#' @rdname WH.correlation2-methods
#' @exportMethod WH.correlation2
setGeneric("WH.correlation2", function(object1, object2, ...) standardGeneric("WH.correlation2")) # weighted corelation matrix
#' @rdname WH.vec.sum-methods
#' @aliases WH.vec.sum,MatH-method
#' @description Compute a histogram that is the weighted sum of the set of histograms contained
#' in a \code{MatH} object, i.e. a matrix of histograms, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object a \code{MatH} object
#' @param ... optional arguments
#' @param w it is possible to add a vector of weights (positive numbers) having the same size of the \code{MatH object},
#' default = equal weights for all cells
#' @return a \code{distributionH} object, i.e. a histogram
#' @examples
#' hsum <- WH.vec.sum(BLOOD)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD) * get.MatH.ncols(BLOOD))
#' hsum <- WH.vec.sum(BLOOD, w = RN)
#' ### SUM of distributions ----
setMethod(
f = "WH.vec.sum", signature = c(object = "MatH"),
function(object, w = numeric(0)) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
nelem <- nrows * ncols
if (missing(w)) {
w <- rep(1, nelem)
}
else {
if (length(object@M) != length(w)) {
stop("Wheights must have the same dimensions of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows, ncols)
SUM <- new("distributionH", c(0, 0), c(0, 1))
for (c in 1:ncols) {
for (r in 1:nrows) {
SUM <- SUM + w[r, c] * object@M[r, c][[1]]
}
}
return(SUM)
}
)
#' @rdname WH.vec.mean-methods
#' @aliases WH.vec.mean,MatH-method
#' @description Compute a histogram that is the weighted mean of the set of histograms contained
#' in a \code{MatH} object, i.e. a matrix of histograms, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object a \code{MatH} object
#' @param ... optional arguments
#' @param w it is possible to add a vector of weights (positive numbers) having the same size of
#' the \code{MatH object}, default = equal weights for all
#' @return a \code{distributionH} object, i.e. a histogram
#' @examples
#' hmean <- WH.vec.mean(BLOOD)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD) * get.MatH.ncols(BLOOD))
#' hmean <- WH.vec.mean(BLOOD, w = RN)
setMethod(
f = "WH.vec.mean", signature = c(object = "MatH"),
function(object, w = numeric(0)) {
# if (length(object@M)==1) return(object)
# WH MEAN H qua si puo migliorare -----
nrows <- nrow(object@M)
ncols <- ncol(object@M)
nelem <- nrows * ncols
if (missing(w)) {
w <- rep(1 / nelem, nelem)
}
else {
if (length(object@M) != length(w)) {
stop("Wheights must have the same dimensions of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows, ncols)
w <- w / sum(w)
if (ncols == 1) {
MEAN <- MEAN_VA(object, w)
}
else {
w2 <- colSums(w)
w2 <- w2 / sum(w2)
MEAN <- w2[1] * MEAN_VA(object[, 1], w[, 1])
for (c in 2:ncols) {
MEAN <- MEAN + w2[c] * MEAN_VA(object[, c], w[, c])
}
}
return(MEAN)
}
)
#' @rdname WH.SSQ-methods
#' @aliases WH.SSQ,MatH-method
#' @description Compute the sum-of-squares-deviations (from the mean) matrix of a \code{MatH} object, i.e.
#' a matrix of numbers, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a squared \code{matrix} with the weighted sum of squares
#' @examples
#' WH.SSQ(BLOOD)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.SSQ(BLOOD, w = RN)
setMethod(
f = "WH.SSQ", signature = c(object = "MatH"),
function(object, w = numeric(0)) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
nelem <- nrows * ncols
if (missing(w)) {
w <- rep(1, nrows)
}
else {
if (nrows != length(w)) {
stop("Wheights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows, 1)
DEV_MAT <- SSQ_RCPP(object, w)
# w=w/sum(w)
# DEV_MAT=matrix(0,ncols,ncols)
colnames(DEV_MAT) <- colnames(object@M)
rownames(DEV_MAT) <- colnames(object@M)
# compute the means
# MEANS=new("MatH",1,ncols)
# for (v1 in 1:ncols){
# MEANS@M[1,v1][[1]]=WH.vec.mean(object[,v1],w)
# }
# for (v1 in 1:ncols){
# for (v2 in v1:ncols){
# for (indiv in 1:nrows){
# if (v1==v2){
# DEV_MAT[v1,v2]=DEV_MAT[v1,v2]+
# w[indiv,1]*((object@M[indiv,v1][[1]]@s)^2+(object@M[indiv,v1][[1]]@m)^2)
# }else{
# DEV_MAT[v1,v2]=DEV_MAT[v1,v2]+
# w[indiv,1]*dotpW(object@M[indiv,v1][[1]],object@M[indiv,v2][[1]])
# }
# }
# if (v2>v1){
# DEV_MAT[v1,v2]=DEV_MAT[v1,v2]-sum(w)*dotpW(MEANS@M[1,v1][[1]],MEANS@M[1,v2][[1]])
# DEV_MAT[v2,v1]=DEV_MAT[v1,v2]
# }else{
# DEV_MAT[v1,v1]=DEV_MAT[v1,v1]-sum(w)*(MEANS@M[1,v1][[1]]@s^2+MEANS@M[1,v1][[1]]@m^2)
# }
# }
# }
# if(ncols==1){
# return(as.vector(DEV_MAT))
# }
# else
return(DEV_MAT)
}
)
#' @rdname WH.var.covar-methods
#' @aliases WH.var.covar,MatH-method
#' @description Compute the variance-covariance matrix of a \code{MatH} object, i.e.
#' a matrix of values consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a squared \code{matrix} with the (weighted) variance-covariance values
#' @references Irpino, A., Verde, R. (2015) \emph{Basic
#' statistics for distributional symbolic variables: a new metric-based
#' approach} Advances in Data Analysis and Classification, DOI
#' 10.1007/s11634-014-0176-4
#' @examples
#' WH.var.covar(BLOOD)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.var.covar(BLOOD, w = RN)
setMethod(
f = "WH.var.covar", signature = c(object = "MatH"),
function(object, w = numeric(0)) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
nelem <- nrows * ncols
if (missing(w)) {
w <- rep(1, nrows)
}
else {
if (nrows != length(w)) {
stop("Weights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows, 1)
w <- w / sum(w)
COV_MAT <- WH.SSQ(object, w)
return(COV_MAT)
}
)
#' @rdname WH.correlation-methods
#' @aliases WH.correlation,MatH-method
#' @description Compute the correlation matrix of a \code{MatH} object, i.e.
#' a matrix of values consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a squared \code{matrix} with the (weighted) correlations indices
#' @references Irpino, A., Verde, R. (2015) \emph{Basic
#' statistics for distributional symbolic variables: a new metric-based
#' approach} Advances in Data Analysis and Classification, DOI
#' 10.1007/s11634-014-0176-4
#' @examples
#' WH.correlation(BLOOD)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.correlation(BLOOD, w = RN)
setMethod(
f = "WH.correlation", signature = c(object = "MatH"),
function(object, w = numeric(0)) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
nelem <- nrows * ncols
if (missing(w)) {
w <- rep(1, nrows)
}
else {
if (nrows != length(w)) {
stop("Wheights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows, 1)
w <- w / sum(w)
COV_MAT <- WH.var.covar(object, w)
CORR_MAT <- as.matrix(COV_MAT)
# browser()
# a=Sys.time()
CORR_MAT <- COV_MAT / (t(t(sqrt(diag(COV_MAT)))) %*% sqrt(diag(COV_MAT)))
# b=Sys.time()
# print(b-a)
#
# for (v1 in 1:ncols){
# for (v2 in v1:ncols){
# CORR_MAT[v1,v2]= COV_MAT[v1,v2]/sqrt((COV_MAT[v1,v1]*COV_MAT[v2,v2]))
# CORR_MAT[v2,v1]=CORR_MAT[v1,v2]
# }
# }
# c=Sys.time()
# print(c-b)
# #
# browser()
return(CORR_MAT)
}
)
#' @rdname WH.SSQ2-methods
#' @aliases WH.SSQ2,MatH-method
#' @description Compute the sum-of-squares-deviations (from the mean) matrix using two \code{MatH} objects having the same number of rows,
#' It returns a rectangular a matrix of numbers, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a rectangular \code{matrix} with the weighted sum of squares
#' @examples
#' M1 <- BLOOD[, 1]
#' M2 <- BLOOD[, 2:3]
#' WH.SSQ2(M1, M2)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.SSQ2(M1, M2, w = RN)
setMethod(
f = "WH.SSQ2", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2, w = numeric(0)) {
nrows1 <- nrow(object1@M)
ncols1 <- ncol(object1@M)
nrows2 <- nrow(object2@M)
ncols2 <- ncol(object2@M)
if (nrows1 != nrows2) {
stop("The two matrices have a different number of rows")
}
if (missing(w)) {
w <- rep(1, nrows1)
}
else {
if (nrows1 != length(w)) {
stop("Wheights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows1, 1)
# w=w/sum(w)
DEV_MAT <- matrix(0, ncols1, ncols2)
rownames(DEV_MAT) <- colnames(object1@M)
colnames(DEV_MAT) <- colnames(object2@M)
# compute the means
MEANS1 <- new("MatH", 1, ncols1)
for (v1 in 1:ncols1) {
MEANS1@M[1, v1][[1]] <- WH.vec.mean(object1[, v1], w)
}
MEANS2 <- new("MatH", 1, ncols2)
for (v2 in 1:ncols2) {
MEANS2@M[1, v2][[1]] <- WH.vec.mean(object2[, v2], w)
}
for (v1 in 1:ncols1) {
for (v2 in 1:ncols2) {
for (indiv in 1:nrows1) {
DEV_MAT[v1, v2] <- DEV_MAT[v1, v2] + w[indiv, 1] * dotpW(object1@M[indiv, v1][[1]], object2@M[indiv, v2][[1]])
}
DEV_MAT[v1, v2] <- DEV_MAT[v1, v2] - sum(w) * dotpW(MEANS1@M[1, v1][[1]], MEANS2@M[1, v2][[1]])
}
}
if (ncols1 == 1 && ncols2 == 1) {
return(as.vector(DEV_MAT))
}
else {
return(DEV_MAT)
}
}
)
#' @rdname WH.var.covar2-methods
#' @aliases WH.var.covar2,MatH-method
#' @description Compute the covariance matrix using two \code{MatH} objects having the same number of rows,
#' It returns a rectangular a matrix of numbers, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a rectangular \code{matrix} with the weighted sum of squares
#' @examples
#' M1 <- BLOOD[, 1]
#' M2 <- BLOOD[, 2:3]
#' WH.var.covar2(M1, M2)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.var.covar2(M1, M2, w = RN)
setMethod(
f = "WH.var.covar2", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2, w = numeric(0)) {
nrows1 <- nrow(object1@M)
ncols1 <- ncol(object1@M)
nrows2 <- nrow(object2@M)
ncols2 <- ncol(object2@M)
if (nrows1 != nrows2) {
stop("The two matrices have a different number of rows")
}
if (missing(w)) {
w <- rep(1, nrows1)
}
else {
if (nrows1 != length(w)) {
stop("Wheights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows1, 1)
w <- w / sum(w)
VAR_MAT <- matrix(0, ncols1, ncols2)
rownames(VAR_MAT) <- colnames(object1@M)
colnames(VAR_MAT) <- colnames(object2@M)
# compute the means
MEANS1 <- new("MatH", 1, ncols1)
for (v1 in 1:ncols1) {
MEANS1@M[1, v1][[1]] <- WH.vec.mean(object1[, v1], w)
}
MEANS2 <- new("MatH", 1, ncols2)
for (v2 in 1:ncols2) {
MEANS2@M[1, v2][[1]] <- WH.vec.mean(object2[, v2], w)
}
for (v1 in 1:ncols1) {
for (v2 in 1:ncols2) {
for (indiv in 1:nrows1) {
VAR_MAT[v1, v2] <- VAR_MAT[v1, v2] + w[indiv, 1] * dotpW(object1@M[indiv, v1][[1]], object2@M[indiv, v2][[1]])
}
VAR_MAT[v1, v2] <- VAR_MAT[v1, v2] - sum(w) * dotpW(MEANS1@M[1, v1][[1]], MEANS2@M[1, v2][[1]])
}
}
if (ncols1 == 1 && ncols2 == 1) {
return(as.vector(VAR_MAT))
}
else {
return(VAR_MAT)
}
}
)
#' @rdname WH.correlation2-methods
#' @aliases WH.correlation2,MatH-method
#' @description Compute the correlation matrix using two \code{MatH} objects having the same number of rows,
#' It returns a rectangular a matrix of numbers, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a rectangular \code{matrix} with the weighted sum of squares
#' @examples
#' M1 <- BLOOD[, 1]
#' M2 <- BLOOD[, 2:3]
#' WH.correlation2(M1, M2)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.correlation2(M1, M2, w = RN)
setMethod(
f = "WH.correlation2", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2, w = numeric(0)) {
nrows1 <- nrow(object1@M)
ncols1 <- ncol(object1@M)
nrows2 <- nrow(object2@M)
ncols2 <- ncol(object2@M)
if (nrows1 != nrows2) {
stop("The two matrices have a different number of rows")
}
if (missing(w)) {
w <- rep(1, nrows1)
}
else {
if (nrows1 != length(w)) {
stop("Wheights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows1, 1)
w <- w / sum(w)
COV_MAT <- WH.var.covar2(object1, object2, w)
CORR_MAT <- as.matrix(COV_MAT)
# qua perde tempo
for (v1 in 1:ncols1) {
for (v2 in 1:ncols2) {
CORR_MAT[v1, v2] <- COV_MAT[v1, v2] / sqrt(WH.var.covar(object1[, v1], w) * WH.var.covar(object2[, v2], w))
}
}
if (length(CORR_MAT) == 1) {
return(as.vector(CORR_MAT))
} else {
return(CORR_MAT)
}
}
)
# Utility methods for registration of distributions ----
#' Method is.registeredMH
#' @name is.registeredMH
#' @rdname is.registeredMH-methods
#' @exportMethod is.registeredMH
setGeneric("is.registeredMH", function(object) standardGeneric("is.registeredMH")) # OK
#' @rdname is.registeredMH-methods
#' @aliases is.registeredMH,MatH-method
#' @description Checks if a \code{MatH} contains histograms described by the same number of
#' bins and the same cdf.
#'
#' @param object A \code{MatH} object
#' @return a \code{logical} value \code{TRUE} if the distributions share the
#' same cdf, \code{FALSE} otherwise.
#' @author Antonio Irpino
#' @references Irpino, A., Lechevallier, Y. and Verde, R. (2006): \emph{Dynamic
#' clustering of histograms using Wasserstein metric} In: Rizzi, A., Vichi, M.
#' (eds.) COMPSTAT 2006. Physica-Verlag, Berlin, 869-876.\cr Irpino, A.,Verde,
#' R. (2006): \emph{A new Wasserstein based distance for the hierarchical
#' clustering of histogram symbolic data} In: Batanjeli, V., Bock, H.H.,
#' Ferligoj, A., Ziberna, A. (eds.) Data Science and Classification, IFCS 2006.
#' Springer, Berlin, 185-192.
#' @keywords distribution
#' @examples
#'
#' ## ---- initialize three distributionH objects mydist1 and mydist2
#' mydist1 <- new("distributionH", c(1, 2, 3), c(0, 0.4, 1))
#' mydist2 <- new("distributionH", c(7, 8, 10, 15), c(0, 0.2, 0.7, 1))
#' mydist3 <- new("distributionH", c(9, 11, 20), c(0, 0.8, 1))
#' ## create a MatH object
#' MyMAT <- new("MatH", nrows = 1, ncols = 3, ListOfDist = c(mydist1, mydist2, mydist3), 1, 3)
#' is.registeredMH(MyMAT)
#' ## [1] FALSE #the distributions do not share the same cdf
#' ## Hint: check with str(MyMAT)
#'
#' ## register the two distributions
#' MATregistered <- registerMH(MyMAT)
#' is.registeredMH(MATregistered)
#' ## TRUE #the distributions share the same cdf
#' ## Hint: check with str(MATregistered)
setMethod(
f = "is.registeredMH", signature = c(object = "MatH"),
# check if all the distributions share the same cdf
# INPUT: object11 - a vector or a matrix two distributions
# OUTPUT: resu - a matrix of distributionH objects with
# recomputed quantiles on a common cdf
function(object) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
ndis <- nrows * ncols
# Check if the distribution are registered
OK <- 1
count <- 1
r <- 1
tmpcdf <- object@M[1, 1][[1]]@p
while (OK == 1) {
count <- count + 1
if (count <= ndis) {
if (!identical(tmpcdf, object@M[count][[1]]@p)) {
OK <- 0
return(FALSE)
}
}
else {
OK <- 0
return(TRUE)
}
}
}
)
#' Method registerMH
#' @name registerMH
#' @rdname registerMH-methods
#' @exportMethod registerMH
setGeneric("registerMH", function(object) standardGeneric("registerMH")) # OK
#' @rdname registerMH-methods
#' @aliases registerMH,MatH-method
#' @description \code{registerMH} method registers a set of distributions of a \code{MatH} object
#' All the
#' distribution are recomputed to obtain distributions sharing the same
#' \code{p} slot. This methods is useful for using fast computation of all
#' methods based on L2 Wasserstein metric. The distributions will have the same
#' number of element in the \code{x} slot without modifing their density
#' function.
#'
#'
#' @param object A \code{MatH} object (a matrix of distributions)
#' @return A \code{MatH} object, a matrix of distributions sharing the same
#' \code{p} slot (i.e. the same cdf).
#' @author Antonio Irpino
#' @references Irpino, A., Lechevallier, Y. and Verde, R. (2006): \emph{Dynamic
#' clustering of histograms using Wasserstein metric} In: Rizzi, A., Vichi, M.
#' (eds.) COMPSTAT 2006. Physica-Verlag, Berlin, 869-876.\cr Irpino, A.,Verde,
#' R. (2006): \emph{A new Wasserstein based distance for the hierarchical
#' clustering of histogram symbolic data} In: Batanjeli, V., Bock, H.H.,
#' Ferligoj, A., Ziberna, A. (eds.) Data Science and Classification, IFCS 2006.
#' Springer, Berlin, 185-192.
#' @keywords distribution
#' @examples
#' # initialize three distributionH objects mydist1 and mydist2
#' mydist1 <- new("distributionH", c(1, 2, 3), c(0, 0.4, 1))
#' mydist2 <- new("distributionH", c(7, 8, 10, 15), c(0, 0.2, 0.7, 1))
#' mydist3 <- new("distributionH", c(9, 11, 20), c(0, 0.8, 1))
#' # create a MatH object
#'
#' MyMAT <- new("MatH", nrows = 1, ncols = 3, ListOfDist = c(mydist1, mydist2, mydist3), 1, 3)
#' # register the two distributions
#' MATregistered <- registerMH(MyMAT)
#' #
#' # OUTPUT the structure of MATregstered
#' str(MATregistered)
#' # Formal class 'MatH' [package "HistDAWass"] with 1 slots
#' # .. @@ M:List of 3
#' # .. ..$ :Formal class 'distributionH' [package "HistDAWass"] with 4 slots
#' # .. .. .. ..@@ x: num [1:6] 1 1.5 2 2.5 2.67 ...
#' # .. .. .. ..@@ p: num [1:6] 0 0.2 0.4 0.7 0.8 1
#' # ...
#' # .. ..$ :Formal class 'distributionH' [package "HistDAWass"] with 4 slots
#' # .. .. .. ..@@ x: num [1:6] 7 8 8.8 10 11.7 ...
#' # .. .. .. ..@@ p: num [1:6] 0 0.2 0.4 0.7 0.8 1
#' # ...
#' # .. ..$ :Formal class 'distributionH' [package "HistDAWass"] with 4 slots
#' # .. .. .. ..@@ x: num [1:6] 9 9.5 10 10.8 11 ...
#' # .. .. .. ..@@ p: num [1:6] 0 0.2 0.4 0.7 0.8 1
#' # ...
#' # .. ..- attr(*, "dim")= int [1:2] 1 3
#' # .. ..- attr(*, "dimnames")=List of 2
#' # .. .. ..$ : chr "I1"
#' # .. .. ..$ : chr [1:3] "X1" "X2" "X3"
#' #
setMethod(
f = "registerMH", signature = c(object = "MatH"),
# register a row or a column vector of qfs of distributionH:
# if the cdf are different a a matrix resu is returned with the quantiles of the two
# distribution computed at the same levels of a common vector of cdfs.
# INPUT: object11 - a vector or a matrix two distributions
# OUTPUT: resu - a matrix of distributionH objects with
# recomputed quantiles on a common cdf
function(object) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
ndis <- nrows * ncols
# Check if the distributions are registered
if (is.registeredMH(object)) {
return(object)
}
commoncdf <- numeric(0)
for (i in 1:nrows) {
for (j in 1:ncols) {
commoncdf <- rbind(commoncdf, t(t(object@M[i, j][[1]]@p)))
}
}
commoncdf <- sort(unique(round(commoncdf, digits = 10)))
commoncdf[1] <- 0
commoncdf[length(commoncdf)] <- 1
# check for tiny bins and for very long vectors of wheights
# end of check
nr <- length(commoncdf)
result <- matrix(0, nr, (ndis + 1))
result[, (ndis + 1)] <- commoncdf
NEWMAT <- new("MatH", nrows, ncols)
for (r in 1:nrows) {
for (c in 1:ncols) {
x <- compQ_vect(object@M[r, c][[1]], vp = commoncdf)
# x=numeric(0)
# for (rr in 1:nr){
# x=c(x,compQ(object@M[r,c][[1]],commoncdf[rr]))
# }
NEWMAT@M[r, c][[1]] <- new("distributionH", x, commoncdf)
}
}
return(NEWMAT)
}
)
#' Method Center.cell.MatH Centers all the cells of a matrix of distributions
#' @name Center.cell.MatH
#' @rdname Center.cell.MatH-methods
#' @exportMethod Center.cell.MatH
setGeneric("Center.cell.MatH", function(object) standardGeneric("Center.cell.MatH")) # OK
#' @rdname Center.cell.MatH-methods
#' @aliases Center.cell.MatH,MatH-method
#' @description The function transform a MatH object (i.e. a matrix of distributions),
#' such that each distribution is shifted and has a mean equal to zero
#' @param object a MatH object, a matrix of distributions.
#' @return A \code{MatH} object, having each distribution with a zero mean.
#' @examples
#' CEN_BLOOD <- Center.cell.MatH(BLOOD)
#' get.MatH.stats(BLOOD, stat = "mean")
setMethod(
f = "Center.cell.MatH", signature = c(object = "MatH"),
function(object) {
nr <- get.MatH.nrows(object)
nc <- get.MatH.ncols(object)
NM <- object
for (i in 1:nr) {
for (j in 1:nc) {
NM@M[i, j][[1]]@x <- NM@M[i, j][[1]]@x - NM@M[i, j][[1]]@m
NM@M[i, j][[1]]@m <- 0
}
}
return(NM)
}
)
## Show overridding ----
#' Method show for MatH
#' @name show-MatH
#' @rdname show-MatH-methods
#' @docType methods
# @aliases show,distributionH-method
# @name show
# @rdname show-MatH
#' @aliases show,MatH-method
#' @description An overriding show method for a \code{MatH} object. The method returns a representation
#' of the matrix using the mean and the standard deviation for each histogram.
#' @param object a \code{MatH} object
#' @examples
#' show(BLOOD)
#' print(BLOOD)
#' BLOOD
setMethod("show",
signature(object = "MatH"),
definition = function(object) {
cat("a matrix of distributions \n", paste(
ncol(object@M), " variables ",
nrow(object@M), " rows \n"
), "each distibution in the cell is represented by the mean and the standard deviation \n ")
mymat <- matrix(0, nrow(object@M) + 1, ncol(object@M))
for (i in 1:ncol(object@M)) {
mymat[1, i] <- colnames(object@M)[i]
}
for (i in 1:nrow(object@M)) {
for (j in 1:ncol(object@M)) {
if (length(object@M[i, j][[1]]@x) == 0) {
mymat[i + 1, j] <- paste("Empty distribution")
}
else {
if ((abs(object@M[i, j][[1]]@m) > 1e5 || abs(object@M[i, j][[1]]@m) < 1e-5) &&
(object@M[i, j][[1]]@s > 1e5 || object@M[i, j][[1]]@s < 1e-5)) {
mymat[i + 1, j] <- paste(
"[m=", format(object@M[i, j][[1]]@m, digits = 5, scientific = TRUE),
" ,s=", format(object@M[i, j][[1]]@s, digits = 5, scientific = TRUE), "]"
)
}
if ((abs(object@M[i, j][[1]]@m) <= 1e5 && abs(object@M[i, j][[1]]@m) >= 1e-5) &&
(object@M[i, j][[1]]@s <= 1e5 || object@M[i, j][[1]]@s >= 1e-5)) {
mymat[i + 1, j] <- paste(
"[m=", format(object@M[i, j][[1]]@m, digits = 5),
" ,s=", format(object@M[i, j][[1]]@s, digits = 5), "]"
)
}
if ((abs(object@M[i, j][[1]]@m) > 1e5 || abs(object@M[i, j][[1]]@m) < 1e-5) &&
(object@M[i, j][[1]]@s <= 1e5 && object@M[i, j][[1]]@s >= 1e-5)) {
mymat[i + 1, j] <- paste(
"[m=", format(object@M[i, j][[1]]@m, digits = 5, scientific = TRUE),
" ,s=", format(object@M[i, j][[1]]@s, digits = 5), "]"
)
}
if ((abs(object@M[i, j][[1]]@m) <= 1e5 && abs(object@M[i, j][[1]]@m) >= 1e-5) &&
(object@M[i, j][[1]]@s > 1e5 || object@M[i, j][[1]]@s < 1e-5)) {
mymat[i + 1, j] <- paste(
"[m=", format(object@M[i, j][[1]]@m, digits = 5),
" ,s=", format(object@M[i, j][[1]]@s, digits = 5, scientific = TRUE), "]"
)
}
}
}
}
rownames(mymat) <- c(
paste(rep(" ", nchar(rownames(object@M)[1])), collapse = ""),
row.names(object@M)
)
write.table(format(mymat, justify = "centre"), row.names = T, col.names = F, quote = F)
}
)
if (!isGeneric("plot")) {
setGeneric(
"plot",
function(x, y, ...) standardGeneric("plot")
)
}
# Plot overloading ----
#' Method plot for a matrix of histograms
#' @name plot-MatH
#' @docType methods
#' @rdname plot-MatH
#' @aliases plot,MatH-method
#' @description An overloading plot function for a \code{MatH} object. The method returns a graphical representation
#' of the matrix of histograms.
#' @param x a \code{distributionH} object
#' @param y not used in this implementation
#' @param type (optional) a string describing the type of plot, default="HISTO".\cr
#' Other allowed types are \cr
#' "DENS"=a density approximation, \cr
#' "BOXPLOT"=l boxplot
#' @param border (optional) a string the color of the border of the plot, default="black".
#' @param angL (optional) angle of labels of rows (DEFAULT=330).
#' @examples
#' plot(BLOOD) # plots BLOOD dataset
#' \dontrun{
#' plot(BLOOD, type = "HISTO", border = "blue") # plots a matrix of histograms
#' plot(BLOOD, type = "DENS", border = "blue") # plots a matrix of densities
#' plot(BLOOD, type = "BOXPLOT") # plots a boxplots
#' }
#' @importFrom utils write.table
#' @export
setMethod(
"plot",
signature(x = "MatH"),
function(x, y = "missing", type = "HISTO", border = "black", angL = 330) {
plot.M(x, type = type, border = border, angL = angL)
}
)
#' Method get.cell.MatH Returns the histogram in a cell of a matrix of distributions
#' @name get.cell.MatH
#' @rdname get.cell.MatH-methods
#' @exportMethod get.cell.MatH
setGeneric("get.cell.MatH", function(object, r, c) standardGeneric("get.cell.MatH")) # OK
#' @rdname get.cell.MatH-methods
#' @aliases get.cell.MatH,MatH-method
#' @description Returns the histogram data in the r-th row and the c-th column.
#' @param object a MatH object, a matrix of distributions.
#' @param r an integer, the row index.
#' @param c an integer, the column index
#'
#' @return A \code{distributionH} object.
#' @examples
#' get.cell.MatH(BLOOD, r = 1, c = 1)
setMethod(
f = "get.cell.MatH", signature = c(object = "MatH", r = "numeric", c = "numeric"),
function(object, r, c) {
nr <- get.MatH.nrows(object)
nc <- get.MatH.ncols(object)
r <- as.integer(r)
c <- as.integer(c)
if (r > nr | r < 1 | c < 1 | c > nc) {
print("Indices out of range")
return(NULL)
} else {
Dist <- object@M[r, c][[1]]
}
return(Dist)
}
)
#' Method set.cell.MatH assign a histogram to a cell of a matrix of histograms
#' @name set.cell.MatH
#' @rdname set.cell.MatH-methods
#' @exportMethod set.cell.MatH
setGeneric("set.cell.MatH", function(object, mat, r, c) standardGeneric("set.cell.MatH")) # OK
#' @rdname set.cell.MatH-methods
#' @aliases set.cell.MatH,MatH-method
#' @description Assign a histogram data to the r-th row and the c-th column of a matrix of histograms.
#' @param object a distributionH object, a matrix of distributions.
#' @param mat a MatH object, a matrix of distributions.
#' @param r an integer, the row index.
#' @param c an integer, the column index
#'
#' @return A \code{MatH} object.
#' @examples
#' mydist <- distributionH(x = c(0, 1, 2, 3, 4), p = c(0, 0.1, 0.6, 0.9, 1))
#' MAT <- set.cell.MatH(mydist, BLOOD, r = 1, c = 1)
setMethod(
f = "set.cell.MatH", signature = c(object = "distributionH", mat = "MatH", r = "numeric", c = "numeric"),
function(object, mat, r, c) {
nr <- get.MatH.nrows(mat)
nc <- get.MatH.ncols(mat)
r <- as.integer(r)
c <- as.integer(c)
if (r > nr | r < 1 | c < 1 | c > nc) {
print("Indices out of range")
return(NULL)
} else {
mat@M[r, c][[1]] <- object
}
return(mat)
}
)
| /R/Met_MatH.R | no_license | cran/HistDAWass | R | false | false | 50,756 | r | #' Wrapper function of \code{MatH} class
#'
#' This function create a matrix of histogram data, i.e. a \code{MatH}
#' object
#'
#' @name MatH
#' @rdname MatH-class
#' @export
#' @param x (optional, default= an empty \code{distributionH} object) a list of
#' \code{distributionH} objects
#' @param nrows (optional, default=1)an integer, the number of rows.
#' @param ncols (optional, default=1) an integer, the number of columns (aka
#' variables).
#' @param rownames (optional, default=NULL) a list of strings containing the
#' names of the rows.
#' @param varnames (optional, default=NULL) a list of strings containing the
#' names of the columns (aka variables).
#' @param by.row (optional, default=FALSE) a logical value, TRUE the matrix is
#' row wise filled, FALSE the matrix is filled column wise.
#' @return A \code{matH} object
#' @examples
#'
#' # bulding an empty 10 by 4 matrix of histograms
#' MAT <- MatH(nrows = 10, ncols = 4)
MatH <- function(x = NULL, nrows = 1, ncols = 1, rownames = NULL, varnames = NULL, by.row = FALSE) {
MAT <- new("MatH",
nrows = nrows,
ncols = ncols,
ListOfDist = x,
names.rows = rownames,
names.cols = varnames, by.row = by.row
)
return(MAT)
}
# overriding of "[" operator for MatH object ----
#' extract from a MatH Method [
#' @name [
#' @rdname extract-methods
#' @aliases [,MatH,ANY,ANY,ANY-method
#' [,MatH-method
#' @description This method overrides the "[" operator for a \code{matH} object.
#' @param x a \code{matH} object
#' @param i a set of integer values identifying the rows
#' @param j a set of integer values identifying the columns
#' @param ... not useful
#' @param drop a logical value inherited from the basic method "[" but not used (default=TRUE)
#' @return A \code{matH} object
#' @examples
#' D <- BLOOD # the BLOOD dataset
#' SUB_D <- BLOOD[c(1, 2, 5), c(1, 2)]
#' @importFrom stats variable.names
#' @export
setMethod(
"[",
signature(x = "MatH"),
function(x, i, j, ..., drop = TRUE) {
if (missing(i) && missing(j)) {
i <- c(1:nrow(x@M))
j <- c(1:ncol(x@M))
}
else {
if (missing(i)) i <- c(1:nrow(x@M))
if (missing(j)) j <- c(1:ncol(x@M))
}
# consider negative indexes!TO BE DONE!!
if (min(i) <= 0 | min(j) <= 0) {
stop("negative indexes are not allowed in subsetting [,] a MatH object")
}
x@M <- matrix(x@M[i, j],
nrow = length(i), ncol = length(j),
dimnames = list(row.names(x@M)[i], colnames(x@M)[j])
)
return(x)
}
)
# methods for getting information from a MatH
setGeneric("get.MatH.nrows", function(object) standardGeneric("get.MatH.nrows"))
#' Method get.MatH.nrows
#' @name get.MatH.nrows
#' @description It returns the number of rows of a \code{MatH} object
#' @param object a \code{MatH} object
#' @return An integer, the number of rows.
#' @exportMethod get.MatH.nrows
#' @rdname get.MatH.nrows-methods
#' @aliases get.MatH.nrows,MatH-method
setMethod(
f = "get.MatH.nrows", signature = c(object = "MatH"),
function(object) {
return(nrow(object@M))
}
)
#' Method get.MatH.ncols
#' @name get.MatH.ncols
#' @rdname get.MatH.ncols-methods
#' @exportMethod get.MatH.ncols
setGeneric("get.MatH.ncols", function(object) standardGeneric("get.MatH.ncols"))
#' @rdname get.MatH.ncols-methods
#' @aliases get.MatH.ncols,MatH-method
#' @description It returns the number of columns of a \code{MatH} object
#' @param object a \code{MatH} object
#' @return An integer, the number of columns.
setMethod(
f = "get.MatH.ncols", signature = c(object = "MatH"),
function(object) {
return(ncol(object@M))
}
)
#' Method get.MatH.rownames
#' @name get.MatH.rownames
#' @rdname get.MatH.rownames-methods
#' @exportMethod get.MatH.rownames
setGeneric("get.MatH.rownames", function(object) standardGeneric("get.MatH.rownames"))
#' @rdname get.MatH.rownames-methods
#' @aliases get.MatH.rownames,MatH-method
#' @description It returns the labels of the rows of a \code{MatH} object
#' @param object a \code{MatH} object
#' @return A vector of char, the label of the rows.
setMethod(
f = "get.MatH.rownames", signature = c(object = "MatH"),
function(object) {
return(rownames(object@M))
}
)
#' Method get.MatH.varnames
#' @name get.MatH.varnames
#' @rdname get.MatH.varnames-methods
#' @exportMethod get.MatH.varnames
setGeneric("get.MatH.varnames", function(object) standardGeneric("get.MatH.varnames"))
#' @rdname get.MatH.varnames-methods
#' @aliases get.MatH.varnames,MatH-method
#' @description It returns the labels of the columns, or the names of the variables, of a \code{MatH} object
#' @param object a \code{MatH} object
#' @return A vector of char, the labels of the columns, or the names of the variables.
setMethod(
f = "get.MatH.varnames", signature = c(object = "MatH"),
function(object) {
return(colnames(object@M))
}
)
#' Method get.MatH.main.info
#' @name get.MatH.main.info
#' @rdname get.MatH.main.info-methods
#' @exportMethod get.MatH.main.info
setGeneric("get.MatH.main.info", function(object) standardGeneric("get.MatH.main.info"))
#' @rdname get.MatH.main.info-methods
#' @aliases get.MatH.main.info,MatH-method
#' @description It returns the number of rows, of columns the labels of rows and columns of a \code{MatH} object.
#' @param object a \code{MatH} object
#' @return A list of char, the labels of the columns, or the names of the variables.
#' @slot nrows - the number of rows
#' @slot ncols - the number of columns
#' @slot rownames - a vector of char, the names of rows
#' @slot varnames - a vector of char, the names of columns
#'
setMethod(
f = "get.MatH.main.info", signature = c(object = "MatH"),
function(object) {
return(list(
nrows = get.MatH.nrows(object), ncols = get.MatH.ncols(object),
rownames = get.MatH.rownames(object), varnames = get.MatH.varnames(object)
))
}
)
#' Method get.MatH.stats
#' @name get.MatH.stats
#' @rdname get.MatH.stats-methods
#' @exportMethod get.MatH.stats
setGeneric("get.MatH.stats", function(object, ...) standardGeneric("get.MatH.stats"))
#' @rdname get.MatH.stats-methods
#' @aliases get.MatH.stats,MatH-method
#' @description It returns statistics for each distribution contained in a \code{MatH} object.
#' @param object a \code{MatH} object
#' @param ... a set of other parameters
#' @param stat (optional) a string containing the required statistic. Default='mean'\cr
#' - \code{stat='mean'} - for computing the mean of each histogram\cr
#' - \code{stat='median'} - for computing the median of each histogram\cr
#' - \code{stat='min'} - for computing the minimum of each histogram\cr
#' - \code{stat='max'} - for computing the maximum of each histogram\cr
#' - \code{stat='std'} - for computing the standard deviatio of each histogram\cr
#' - \code{stat='skewness'} - for computing the skewness of each histogram\cr
#' - \code{stat='kurtosis'} - for computing the kurtosis of each histogram\cr
#' - \code{stat='quantile'} - for computing the quantile ot level \code{prob} of each histogram\cr
#' @param prob (optional)a number between 0 and 1 for computing the value once choosen the \code{'quantile'} option for \code{stat}.
#' @return A list
#' @slot stat - the chosen statistic
#' @slot prob - level of probability if stat='quantile'
#' @slot MAT - a matrix of values
#' @examples
#' get.MatH.stats(BLOOD) # the means of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "median") # the medians of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "quantile", prob = 0.5) # the same as median
#' get.MatH.stats(BLOOD, stat = "min") # minima of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "quantile", prob = 0) # the same as min
#' get.MatH.stats(BLOOD, stat = "max") # maxima of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "quantile", prob = 1) # the same as max
#' get.MatH.stats(BLOOD, stat = "std") # standard deviations of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "skewness") # skewness indices of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "kurtosis") # kurtosis indices of the distributions in BLOOD dataset
#' get.MatH.stats(BLOOD, stat = "quantile", prob = 0.05)
#' # the fifth percentiles of distributions in BLOOD dataset
setMethod(
f = "get.MatH.stats", signature = c(object = "MatH"),
function(object, stat = "mean", prob = 0.5) {
r <- get.MatH.nrows(object)
c <- get.MatH.ncols(object)
MAT <- matrix(NA, get.MatH.nrows(object), get.MatH.ncols(object))
rownames(MAT) <- get.MatH.rownames(object)
colnames(MAT) <- get.MatH.varnames(object)
for (i in 1:r) {
for (j in 1:c) {
if (length(object@M[i, j][[1]]@x) > 0) {
if (stat == "mean") {
MAT[i, j] <- object@M[i, j][[1]]@m
}
if (stat == "std") {
MAT[i, j] <- object@M[i, j][[1]]@s
}
if (stat == "skewness") {
MAT[i, j] <- skewH(object@M[i, j][[1]])
}
if (stat == "kurtosis") {
MAT[i, j] <- kurtH(object@M[i, j][[1]])
}
if (stat == "median") {
MAT[i, j] <- compQ(object = object@M[i, j][[1]], p = 0.5)
}
if (stat == "quantile") {
MAT[i, j] <- compQ(object = object@M[i, j][[1]], p = prob)
}
if (stat == "min") {
MAT[i, j] <- compQ(object = object@M[i, j][[1]], p = 0)
}
if (stat == "max") {
MAT[i, j] <- compQ(object = object@M[i, j][[1]], p = 1)
}
}
}
}
if (stat == "quantile") {
return(list(stat = stat, prob = prob, mat = MAT))
} else {
return(list(stat = stat, mat = MAT))
}
}
)
# methods for collating by row or by column two MatHs ----
#' Method WH.bind.row
#' @name WH.bind.row
#' @rdname WH.bind.row-methods
#' @exportMethod WH.bind.row
setGeneric("WH.bind.row", function(object1, object2) standardGeneric("WH.bind.row")) #
#' Method WH.bind.col
#' @name WH.bind.col
#' @rdname WH.bind.col-methods
#' @exportMethod WH.bind.col
setGeneric("WH.bind.col", function(object1, object2) standardGeneric("WH.bind.col")) #
#' Method WH.bind
#' @name WH.bind
#' @rdname WH.bind-methods
#' @exportMethod WH.bind
setGeneric("WH.bind", function(object1, object2, byrow) standardGeneric("WH.bind")) #
#' @rdname WH.bind.row-methods
#' @aliases WH.bind.row,MatH-method
#' @description It attaches two \code{MatH} objects with the same columns by row.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @return a \code{MatH} object,
#' @examples
#' M1 <- BLOOD[1:3, ]
#' M2 <- BLOOD[5:8, ]
#' MAT <- WH.bind.row(M1, M2)
setMethod(
f = "WH.bind.row", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2) {
ncol1 <- ncol(object1@M)
ncol2 <- ncol(object2@M)
nrow1 <- nrow(object1@M)
nrow2 <- nrow(object2@M)
if (ncol1 != ncol2) {
stop("The two matrix must have the same number of columns")
}
object1@M <- rbind(object1@M, object2@M)
return(object1)
}
)
#' @rdname WH.bind.col-methods
#' @aliases WH.bind.col,MatH-method
#' @description It attaches two \code{MatH} objects with the same rows by colums.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @return a \code{MatH} object,
#' @examples
#' M1 <- BLOOD[1:10, 1]
#' M2 <- BLOOD[1:10, 3]
#' MAT <- WH.bind.col(M1, M2)
setMethod(
f = "WH.bind.col", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2) {
ncol1 <- ncol(object1@M)
ncol2 <- ncol(object2@M)
nrow1 <- nrow(object1@M)
nrow2 <- nrow(object2@M)
if (nrow1 != nrow2) {
stop("The two matrix must have the same number of rows")
}
# NewMat=new("MatH", nrows=nrow1,ncols=ncol1+ncol2)
object1@M <- cbind(object1@M, object2@M)
return(object1)
}
)
#' @rdname WH.bind-methods
#' @aliases WH.bind,MatH-method
#' @description It attaches two \code{MatH} objects with the same columns by row, or the same rows by colum.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @param byrow a logical value (default=TRUE) attaches the objects by row
#' @return a \code{MatH} object,
#' @examples
#' # binding by row
#' M1 <- BLOOD[1:10, 1]
#' M2 <- BLOOD[1:10, 3]
#' MAT <- WH.bind(M1, M2, byrow = TRUE)
#' # binding by col
#' M1 <- BLOOD[1:10, 1]
#' M2 <- BLOOD[1:10, 3]
#' MAT <- WH.bind(M1, M2, byrow = FALSE)
#' @seealso \code{\link{WH.bind.row}} for binding by row, \code{\link{WH.bind.col}} for binding by column
setMethod(
f = "WH.bind", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2, byrow = TRUE) {
ncol1 <- ncol(object1@M)
ncol2 <- ncol(object2@M)
nrow1 <- nrow(object1@M)
nrow2 <- nrow(object2@M)
if (byrow == TRUE) {
NewMat <- WH.bind.row(object1, object2)
}
else {
NewMat <- WH.bind.col(object1, object2)
}
return(NewMat)
}
)
# methods for MatH based on the L2 Wasserstein distance between distributions ----
#' Method WH.mat.sum
#' @name WH.mat.sum
#' @rdname WH.mat.sum-methods
#' @exportMethod WH.mat.sum
setGeneric("WH.mat.sum", function(object1, object2) standardGeneric("WH.mat.sum")) # ok matrix sum
#' Method WH.mat.prod
#' @name WH.mat.prod
#' @rdname WH.mat.prod-methods
#' @exportMethod WH.mat.prod
setGeneric("WH.mat.prod", function(object1, object2, ...) standardGeneric("WH.mat.prod")) # ok matrix product
#' @rdname WH.mat.sum-methods
#' @aliases WH.mat.sum,MatH-method
#' @description It sums two \code{MatH} objects, i.e. two matrices of distributions,
#' by summing the quantile functions of histograms. This sum is consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @return a \code{MatH} object,
#' @examples
#' # binding by row
#' M1 <- BLOOD[1:5, ]
#' M2 <- BLOOD[6:10, ]
#' MAT <- WH.mat.sum(M1, M2)
setMethod(
f = "WH.mat.sum", signature = c(object1 = "MatH", object2 = "MatH"),
# sums two MatH, i.e. two matrices of distributionsH
# INPUT:
# OUTPUT:
function(object1, object2) {
nrows1 <- nrow(object1@M)
ncols1 <- ncol(object1@M)
nrows2 <- nrow(object1@M)
ncols2 <- ncol(object1@M)
if (!identical(dim(object1@M), dim(object2@M))) {
stop("the two matrices must be of the same dimension")
}
else {
MATS <- object1
TMP <- new("MatH", 1, 2)
for (r in 1:nrows1) {
for (c in 1:ncols1) {
TMP@M[1, 1][[1]] <- object1@M[r, c][[1]]
TMP@M[1, 2][[1]] <- object2@M[r, c][[1]]
TMP <- registerMH(TMP)
MATS@M[r, c][[1]] <- new(
"distributionH",
(TMP@M[1, 1][[1]]@x + TMP@M[1, 2][[1]]@x),
TMP@M[1, 1][[1]]@p,
(TMP@M[1, 1][[1]]@m + TMP@M[1, 2][[1]]@m)
)
}
}
}
return(MATS)
}
)
#' @rdname WH.mat.prod-methods
#' @aliases WH.mat.prod,MatH-method
#' @description It is the matrix product of two \code{MatH} objects, i.e. two matrices of distributions,
#' by using the dot product of two histograms that is consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @param ... other optional parameters
#' @param traspose1 a logical value, default=FALSE. If TRUE trasposes object1
#' @param traspose2 a logical value, default=FALSE. If TRUE trasposes object2
#' @return a matrix of numbers
#' @examples
#'
#' M1 <- BLOOD[1:5, ]
#' M2 <- BLOOD[6:10, ]
#' MAT <- WH.mat.prod(M1, M2, traspose1 = TRUE, traspose2 = FALSE)
setMethod(
f = "WH.mat.prod", signature = c(object1 = "MatH", object2 = "MatH"),
# sums two MatH, i.e. two matrics of distributionsH
# INPUT:
# OUTPUT:
function(object1, object2, traspose1 = FALSE, traspose2 = FALSE) {
if (traspose1 == TRUE) {
# trasposing the first matrix
object1@M <- t(object1@M)
}
if (traspose2 == TRUE) {
# trasposing the second matrix
object2@M <- t(object2@M)
}
nrows1 <- nrow(object1@M)
ncols1 <- ncol(object1@M)
nrows2 <- nrow(object2@M)
ncols2 <- ncol(object2@M)
if (ncols1 != nrows2) {
cat(
"Fisrt matrix dimensions ", nrow(object1@M), "x", ncol(object1@M), "\n",
"Second matrix dimensions ", nrow(object2@M), "x", ncol(object2@M), "\n"
)
stop("Dimensions of matrices are not compatible")
}
MAT <- matrix(0, nrows1, ncols2)
# cat("Fisrt matrix dimensions ", nrow(object1@M), "x", ncol(object1@M), "\n",
# "Second matrix dimensions ", nrow(object2@M), "x", ncol(object2@M), "\n")
for (r in 1:nrows1) {
for (c in 1:ncols2) {
for (els in 1:ncols1) {
MAT[r, c] <- MAT[r, c] + dotpW(object1@M[r, els][[1]], object2@M[els, c][[1]])
}
}
}
return(MAT)
}
)
# L2 Wasserstein basic operations and basic statistics for matrices of distributionH ----
#' Method WH.vec.sum
#' @name WH.vec.sum
#' @rdname WH.vec.sum-methods
#' @exportMethod WH.vec.sum
setGeneric("WH.vec.sum", function(object, ...) standardGeneric("WH.vec.sum")) # OK weighted sum of a vector of distributionH
#' Method WH.vec.mean
#' @name WH.vec.mean
#' @rdname WH.vec.mean-methods
#' @exportMethod WH.vec.mean
setGeneric("WH.vec.mean", function(object, ...) standardGeneric("WH.vec.mean")) # OK weighted mean of a vector of distributionH
#' Method WH.SSQ
#' @name WH.SSQ
#' @rdname WH.SSQ-methods
#' @exportMethod WH.SSQ
setGeneric("WH.SSQ", function(object, ...) standardGeneric("WH.SSQ")) # weighted de-codeviance matrix
#' Method WH.var.covar
#' @name WH.var.covar
#' @rdname WH.var.covar-methods
#' @exportMethod WH.var.covar
setGeneric("WH.var.covar", function(object, ...) standardGeneric("WH.var.covar")) # weighted variance variance matrix
#' Method WH.correlation
#' @name WH.correlation
#' @rdname WH.correlation-methods
#' @exportMethod WH.correlation
setGeneric("WH.correlation", function(object, ...) standardGeneric("WH.correlation")) # weighted corelation matrix
#' Method WH.SSQ2
#' @name WH.SSQ2
#' @rdname WH.SSQ2-methods
#' @exportMethod WH.SSQ2
setGeneric("WH.SSQ2", function(object1, object2, ...) standardGeneric("WH.SSQ2")) # weighted de-codeviance matrix
#' Method WH.var.covar2
#' @name WH.var.covar2
#' @rdname WH.var.covar2-methods
#' @exportMethod WH.var.covar2
setGeneric("WH.var.covar2", function(object1, object2, ...) standardGeneric("WH.var.covar2")) # weighted variance variance matrix
#' Method WH.correlation2
#' @name WH.correlation2
#' @rdname WH.correlation2-methods
#' @exportMethod WH.correlation2
setGeneric("WH.correlation2", function(object1, object2, ...) standardGeneric("WH.correlation2")) # weighted corelation matrix
#' @rdname WH.vec.sum-methods
#' @aliases WH.vec.sum,MatH-method
#' @description Compute a histogram that is the weighted sum of the set of histograms contained
#' in a \code{MatH} object, i.e. a matrix of histograms, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object a \code{MatH} object
#' @param ... optional arguments
#' @param w it is possible to add a vector of weights (positive numbers) having the same size of the \code{MatH object},
#' default = equal weights for all cells
#' @return a \code{distributionH} object, i.e. a histogram
#' @examples
#' hsum <- WH.vec.sum(BLOOD)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD) * get.MatH.ncols(BLOOD))
#' hsum <- WH.vec.sum(BLOOD, w = RN)
#' ### SUM of distributions ----
setMethod(
f = "WH.vec.sum", signature = c(object = "MatH"),
function(object, w = numeric(0)) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
nelem <- nrows * ncols
if (missing(w)) {
w <- rep(1, nelem)
}
else {
if (length(object@M) != length(w)) {
stop("Wheights must have the same dimensions of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows, ncols)
SUM <- new("distributionH", c(0, 0), c(0, 1))
for (c in 1:ncols) {
for (r in 1:nrows) {
SUM <- SUM + w[r, c] * object@M[r, c][[1]]
}
}
return(SUM)
}
)
#' @rdname WH.vec.mean-methods
#' @aliases WH.vec.mean,MatH-method
#' @description Compute a histogram that is the weighted mean of the set of histograms contained
#' in a \code{MatH} object, i.e. a matrix of histograms, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object a \code{MatH} object
#' @param ... optional arguments
#' @param w it is possible to add a vector of weights (positive numbers) having the same size of
#' the \code{MatH object}, default = equal weights for all
#' @return a \code{distributionH} object, i.e. a histogram
#' @examples
#' hmean <- WH.vec.mean(BLOOD)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD) * get.MatH.ncols(BLOOD))
#' hmean <- WH.vec.mean(BLOOD, w = RN)
setMethod(
f = "WH.vec.mean", signature = c(object = "MatH"),
function(object, w = numeric(0)) {
# if (length(object@M)==1) return(object)
# WH MEAN H qua si puo migliorare -----
nrows <- nrow(object@M)
ncols <- ncol(object@M)
nelem <- nrows * ncols
if (missing(w)) {
w <- rep(1 / nelem, nelem)
}
else {
if (length(object@M) != length(w)) {
stop("Wheights must have the same dimensions of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows, ncols)
w <- w / sum(w)
if (ncols == 1) {
MEAN <- MEAN_VA(object, w)
}
else {
w2 <- colSums(w)
w2 <- w2 / sum(w2)
MEAN <- w2[1] * MEAN_VA(object[, 1], w[, 1])
for (c in 2:ncols) {
MEAN <- MEAN + w2[c] * MEAN_VA(object[, c], w[, c])
}
}
return(MEAN)
}
)
#' @rdname WH.SSQ-methods
#' @aliases WH.SSQ,MatH-method
#' @description Compute the sum-of-squares-deviations (from the mean) matrix of a \code{MatH} object, i.e.
#' a matrix of numbers, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a squared \code{matrix} with the weighted sum of squares
#' @examples
#' WH.SSQ(BLOOD)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.SSQ(BLOOD, w = RN)
setMethod(
f = "WH.SSQ", signature = c(object = "MatH"),
function(object, w = numeric(0)) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
nelem <- nrows * ncols
if (missing(w)) {
w <- rep(1, nrows)
}
else {
if (nrows != length(w)) {
stop("Wheights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows, 1)
DEV_MAT <- SSQ_RCPP(object, w)
# w=w/sum(w)
# DEV_MAT=matrix(0,ncols,ncols)
colnames(DEV_MAT) <- colnames(object@M)
rownames(DEV_MAT) <- colnames(object@M)
# compute the means
# MEANS=new("MatH",1,ncols)
# for (v1 in 1:ncols){
# MEANS@M[1,v1][[1]]=WH.vec.mean(object[,v1],w)
# }
# for (v1 in 1:ncols){
# for (v2 in v1:ncols){
# for (indiv in 1:nrows){
# if (v1==v2){
# DEV_MAT[v1,v2]=DEV_MAT[v1,v2]+
# w[indiv,1]*((object@M[indiv,v1][[1]]@s)^2+(object@M[indiv,v1][[1]]@m)^2)
# }else{
# DEV_MAT[v1,v2]=DEV_MAT[v1,v2]+
# w[indiv,1]*dotpW(object@M[indiv,v1][[1]],object@M[indiv,v2][[1]])
# }
# }
# if (v2>v1){
# DEV_MAT[v1,v2]=DEV_MAT[v1,v2]-sum(w)*dotpW(MEANS@M[1,v1][[1]],MEANS@M[1,v2][[1]])
# DEV_MAT[v2,v1]=DEV_MAT[v1,v2]
# }else{
# DEV_MAT[v1,v1]=DEV_MAT[v1,v1]-sum(w)*(MEANS@M[1,v1][[1]]@s^2+MEANS@M[1,v1][[1]]@m^2)
# }
# }
# }
# if(ncols==1){
# return(as.vector(DEV_MAT))
# }
# else
return(DEV_MAT)
}
)
#' @rdname WH.var.covar-methods
#' @aliases WH.var.covar,MatH-method
#' @description Compute the variance-covariance matrix of a \code{MatH} object, i.e.
#' a matrix of values consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a squared \code{matrix} with the (weighted) variance-covariance values
#' @references Irpino, A., Verde, R. (2015) \emph{Basic
#' statistics for distributional symbolic variables: a new metric-based
#' approach} Advances in Data Analysis and Classification, DOI
#' 10.1007/s11634-014-0176-4
#' @examples
#' WH.var.covar(BLOOD)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.var.covar(BLOOD, w = RN)
setMethod(
f = "WH.var.covar", signature = c(object = "MatH"),
function(object, w = numeric(0)) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
nelem <- nrows * ncols
if (missing(w)) {
w <- rep(1, nrows)
}
else {
if (nrows != length(w)) {
stop("Weights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows, 1)
w <- w / sum(w)
COV_MAT <- WH.SSQ(object, w)
return(COV_MAT)
}
)
#' @rdname WH.correlation-methods
#' @aliases WH.correlation,MatH-method
#' @description Compute the correlation matrix of a \code{MatH} object, i.e.
#' a matrix of values consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a squared \code{matrix} with the (weighted) correlations indices
#' @references Irpino, A., Verde, R. (2015) \emph{Basic
#' statistics for distributional symbolic variables: a new metric-based
#' approach} Advances in Data Analysis and Classification, DOI
#' 10.1007/s11634-014-0176-4
#' @examples
#' WH.correlation(BLOOD)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.correlation(BLOOD, w = RN)
setMethod(
f = "WH.correlation", signature = c(object = "MatH"),
function(object, w = numeric(0)) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
nelem <- nrows * ncols
if (missing(w)) {
w <- rep(1, nrows)
}
else {
if (nrows != length(w)) {
stop("Wheights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows, 1)
w <- w / sum(w)
COV_MAT <- WH.var.covar(object, w)
CORR_MAT <- as.matrix(COV_MAT)
# browser()
# a=Sys.time()
CORR_MAT <- COV_MAT / (t(t(sqrt(diag(COV_MAT)))) %*% sqrt(diag(COV_MAT)))
# b=Sys.time()
# print(b-a)
#
# for (v1 in 1:ncols){
# for (v2 in v1:ncols){
# CORR_MAT[v1,v2]= COV_MAT[v1,v2]/sqrt((COV_MAT[v1,v1]*COV_MAT[v2,v2]))
# CORR_MAT[v2,v1]=CORR_MAT[v1,v2]
# }
# }
# c=Sys.time()
# print(c-b)
# #
# browser()
return(CORR_MAT)
}
)
#' @rdname WH.SSQ2-methods
#' @aliases WH.SSQ2,MatH-method
#' @description Compute the sum-of-squares-deviations (from the mean) matrix using two \code{MatH} objects having the same number of rows,
#' It returns a rectangular a matrix of numbers, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a rectangular \code{matrix} with the weighted sum of squares
#' @examples
#' M1 <- BLOOD[, 1]
#' M2 <- BLOOD[, 2:3]
#' WH.SSQ2(M1, M2)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.SSQ2(M1, M2, w = RN)
setMethod(
f = "WH.SSQ2", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2, w = numeric(0)) {
nrows1 <- nrow(object1@M)
ncols1 <- ncol(object1@M)
nrows2 <- nrow(object2@M)
ncols2 <- ncol(object2@M)
if (nrows1 != nrows2) {
stop("The two matrices have a different number of rows")
}
if (missing(w)) {
w <- rep(1, nrows1)
}
else {
if (nrows1 != length(w)) {
stop("Wheights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows1, 1)
# w=w/sum(w)
DEV_MAT <- matrix(0, ncols1, ncols2)
rownames(DEV_MAT) <- colnames(object1@M)
colnames(DEV_MAT) <- colnames(object2@M)
# compute the means
MEANS1 <- new("MatH", 1, ncols1)
for (v1 in 1:ncols1) {
MEANS1@M[1, v1][[1]] <- WH.vec.mean(object1[, v1], w)
}
MEANS2 <- new("MatH", 1, ncols2)
for (v2 in 1:ncols2) {
MEANS2@M[1, v2][[1]] <- WH.vec.mean(object2[, v2], w)
}
for (v1 in 1:ncols1) {
for (v2 in 1:ncols2) {
for (indiv in 1:nrows1) {
DEV_MAT[v1, v2] <- DEV_MAT[v1, v2] + w[indiv, 1] * dotpW(object1@M[indiv, v1][[1]], object2@M[indiv, v2][[1]])
}
DEV_MAT[v1, v2] <- DEV_MAT[v1, v2] - sum(w) * dotpW(MEANS1@M[1, v1][[1]], MEANS2@M[1, v2][[1]])
}
}
if (ncols1 == 1 && ncols2 == 1) {
return(as.vector(DEV_MAT))
}
else {
return(DEV_MAT)
}
}
)
#' @rdname WH.var.covar2-methods
#' @aliases WH.var.covar2,MatH-method
#' @description Compute the covariance matrix using two \code{MatH} objects having the same number of rows,
#' It returns a rectangular a matrix of numbers, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a rectangular \code{matrix} with the weighted sum of squares
#' @examples
#' M1 <- BLOOD[, 1]
#' M2 <- BLOOD[, 2:3]
#' WH.var.covar2(M1, M2)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.var.covar2(M1, M2, w = RN)
setMethod(
f = "WH.var.covar2", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2, w = numeric(0)) {
nrows1 <- nrow(object1@M)
ncols1 <- ncol(object1@M)
nrows2 <- nrow(object2@M)
ncols2 <- ncol(object2@M)
if (nrows1 != nrows2) {
stop("The two matrices have a different number of rows")
}
if (missing(w)) {
w <- rep(1, nrows1)
}
else {
if (nrows1 != length(w)) {
stop("Wheights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows1, 1)
w <- w / sum(w)
VAR_MAT <- matrix(0, ncols1, ncols2)
rownames(VAR_MAT) <- colnames(object1@M)
colnames(VAR_MAT) <- colnames(object2@M)
# compute the means
MEANS1 <- new("MatH", 1, ncols1)
for (v1 in 1:ncols1) {
MEANS1@M[1, v1][[1]] <- WH.vec.mean(object1[, v1], w)
}
MEANS2 <- new("MatH", 1, ncols2)
for (v2 in 1:ncols2) {
MEANS2@M[1, v2][[1]] <- WH.vec.mean(object2[, v2], w)
}
for (v1 in 1:ncols1) {
for (v2 in 1:ncols2) {
for (indiv in 1:nrows1) {
VAR_MAT[v1, v2] <- VAR_MAT[v1, v2] + w[indiv, 1] * dotpW(object1@M[indiv, v1][[1]], object2@M[indiv, v2][[1]])
}
VAR_MAT[v1, v2] <- VAR_MAT[v1, v2] - sum(w) * dotpW(MEANS1@M[1, v1][[1]], MEANS2@M[1, v2][[1]])
}
}
if (ncols1 == 1 && ncols2 == 1) {
return(as.vector(VAR_MAT))
}
else {
return(VAR_MAT)
}
}
)
#' @rdname WH.correlation2-methods
#' @aliases WH.correlation2,MatH-method
#' @description Compute the correlation matrix using two \code{MatH} objects having the same number of rows,
#' It returns a rectangular a matrix of numbers, consistent with
#' a set of distributions equipped with a L2 wasserstein metric.
#' @param object1 a \code{MatH} object
#' @param object2 a \code{MatH} object
#' @param ... some optional parameters
#' @param w it is possible to add a vector of weights (positive numbers)
#' having the same size of the rows of the \code{MatH object},
#' default = equal weight for each row
#' @return a rectangular \code{matrix} with the weighted sum of squares
#' @examples
#' M1 <- BLOOD[, 1]
#' M2 <- BLOOD[, 2:3]
#' WH.correlation2(M1, M2)
#' # generate a set of random weights
#' RN <- runif(get.MatH.nrows(BLOOD))
#' WH.correlation2(M1, M2, w = RN)
setMethod(
f = "WH.correlation2", signature = c(object1 = "MatH", object2 = "MatH"),
function(object1, object2, w = numeric(0)) {
nrows1 <- nrow(object1@M)
ncols1 <- ncol(object1@M)
nrows2 <- nrow(object2@M)
ncols2 <- ncol(object2@M)
if (nrows1 != nrows2) {
stop("The two matrices have a different number of rows")
}
if (missing(w)) {
w <- rep(1, nrows1)
}
else {
if (nrows1 != length(w)) {
stop("Wheights must have the same length of rows of the input matrix of distributions")
}
if (min(w) < 0) {
stop("Weights must be positive!!")
}
}
w <- matrix(w, nrows1, 1)
w <- w / sum(w)
COV_MAT <- WH.var.covar2(object1, object2, w)
CORR_MAT <- as.matrix(COV_MAT)
# qua perde tempo
for (v1 in 1:ncols1) {
for (v2 in 1:ncols2) {
CORR_MAT[v1, v2] <- COV_MAT[v1, v2] / sqrt(WH.var.covar(object1[, v1], w) * WH.var.covar(object2[, v2], w))
}
}
if (length(CORR_MAT) == 1) {
return(as.vector(CORR_MAT))
} else {
return(CORR_MAT)
}
}
)
# Utility methods for registration of distributions ----
#' Method is.registeredMH
#' @name is.registeredMH
#' @rdname is.registeredMH-methods
#' @exportMethod is.registeredMH
setGeneric("is.registeredMH", function(object) standardGeneric("is.registeredMH")) # OK
#' @rdname is.registeredMH-methods
#' @aliases is.registeredMH,MatH-method
#' @description Checks if a \code{MatH} contains histograms described by the same number of
#' bins and the same cdf.
#'
#' @param object A \code{MatH} object
#' @return a \code{logical} value \code{TRUE} if the distributions share the
#' same cdf, \code{FALSE} otherwise.
#' @author Antonio Irpino
#' @references Irpino, A., Lechevallier, Y. and Verde, R. (2006): \emph{Dynamic
#' clustering of histograms using Wasserstein metric} In: Rizzi, A., Vichi, M.
#' (eds.) COMPSTAT 2006. Physica-Verlag, Berlin, 869-876.\cr Irpino, A.,Verde,
#' R. (2006): \emph{A new Wasserstein based distance for the hierarchical
#' clustering of histogram symbolic data} In: Batanjeli, V., Bock, H.H.,
#' Ferligoj, A., Ziberna, A. (eds.) Data Science and Classification, IFCS 2006.
#' Springer, Berlin, 185-192.
#' @keywords distribution
#' @examples
#'
#' ## ---- initialize three distributionH objects mydist1 and mydist2
#' mydist1 <- new("distributionH", c(1, 2, 3), c(0, 0.4, 1))
#' mydist2 <- new("distributionH", c(7, 8, 10, 15), c(0, 0.2, 0.7, 1))
#' mydist3 <- new("distributionH", c(9, 11, 20), c(0, 0.8, 1))
#' ## create a MatH object
#' MyMAT <- new("MatH", nrows = 1, ncols = 3, ListOfDist = c(mydist1, mydist2, mydist3), 1, 3)
#' is.registeredMH(MyMAT)
#' ## [1] FALSE #the distributions do not share the same cdf
#' ## Hint: check with str(MyMAT)
#'
#' ## register the two distributions
#' MATregistered <- registerMH(MyMAT)
#' is.registeredMH(MATregistered)
#' ## TRUE #the distributions share the same cdf
#' ## Hint: check with str(MATregistered)
setMethod(
f = "is.registeredMH", signature = c(object = "MatH"),
# check if all the distributions share the same cdf
# INPUT: object11 - a vector or a matrix two distributions
# OUTPUT: resu - a matrix of distributionH objects with
# recomputed quantiles on a common cdf
function(object) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
ndis <- nrows * ncols
# Check if the distribution are registered
OK <- 1
count <- 1
r <- 1
tmpcdf <- object@M[1, 1][[1]]@p
while (OK == 1) {
count <- count + 1
if (count <= ndis) {
if (!identical(tmpcdf, object@M[count][[1]]@p)) {
OK <- 0
return(FALSE)
}
}
else {
OK <- 0
return(TRUE)
}
}
}
)
#' Method registerMH
#' @name registerMH
#' @rdname registerMH-methods
#' @exportMethod registerMH
setGeneric("registerMH", function(object) standardGeneric("registerMH")) # OK
#' @rdname registerMH-methods
#' @aliases registerMH,MatH-method
#' @description \code{registerMH} method registers a set of distributions of a \code{MatH} object
#' All the
#' distribution are recomputed to obtain distributions sharing the same
#' \code{p} slot. This methods is useful for using fast computation of all
#' methods based on L2 Wasserstein metric. The distributions will have the same
#' number of element in the \code{x} slot without modifing their density
#' function.
#'
#'
#' @param object A \code{MatH} object (a matrix of distributions)
#' @return A \code{MatH} object, a matrix of distributions sharing the same
#' \code{p} slot (i.e. the same cdf).
#' @author Antonio Irpino
#' @references Irpino, A., Lechevallier, Y. and Verde, R. (2006): \emph{Dynamic
#' clustering of histograms using Wasserstein metric} In: Rizzi, A., Vichi, M.
#' (eds.) COMPSTAT 2006. Physica-Verlag, Berlin, 869-876.\cr Irpino, A.,Verde,
#' R. (2006): \emph{A new Wasserstein based distance for the hierarchical
#' clustering of histogram symbolic data} In: Batanjeli, V., Bock, H.H.,
#' Ferligoj, A., Ziberna, A. (eds.) Data Science and Classification, IFCS 2006.
#' Springer, Berlin, 185-192.
#' @keywords distribution
#' @examples
#' # initialize three distributionH objects mydist1 and mydist2
#' mydist1 <- new("distributionH", c(1, 2, 3), c(0, 0.4, 1))
#' mydist2 <- new("distributionH", c(7, 8, 10, 15), c(0, 0.2, 0.7, 1))
#' mydist3 <- new("distributionH", c(9, 11, 20), c(0, 0.8, 1))
#' # create a MatH object
#'
#' MyMAT <- new("MatH", nrows = 1, ncols = 3, ListOfDist = c(mydist1, mydist2, mydist3), 1, 3)
#' # register the two distributions
#' MATregistered <- registerMH(MyMAT)
#' #
#' # OUTPUT the structure of MATregstered
#' str(MATregistered)
#' # Formal class 'MatH' [package "HistDAWass"] with 1 slots
#' # .. @@ M:List of 3
#' # .. ..$ :Formal class 'distributionH' [package "HistDAWass"] with 4 slots
#' # .. .. .. ..@@ x: num [1:6] 1 1.5 2 2.5 2.67 ...
#' # .. .. .. ..@@ p: num [1:6] 0 0.2 0.4 0.7 0.8 1
#' # ...
#' # .. ..$ :Formal class 'distributionH' [package "HistDAWass"] with 4 slots
#' # .. .. .. ..@@ x: num [1:6] 7 8 8.8 10 11.7 ...
#' # .. .. .. ..@@ p: num [1:6] 0 0.2 0.4 0.7 0.8 1
#' # ...
#' # .. ..$ :Formal class 'distributionH' [package "HistDAWass"] with 4 slots
#' # .. .. .. ..@@ x: num [1:6] 9 9.5 10 10.8 11 ...
#' # .. .. .. ..@@ p: num [1:6] 0 0.2 0.4 0.7 0.8 1
#' # ...
#' # .. ..- attr(*, "dim")= int [1:2] 1 3
#' # .. ..- attr(*, "dimnames")=List of 2
#' # .. .. ..$ : chr "I1"
#' # .. .. ..$ : chr [1:3] "X1" "X2" "X3"
#' #
setMethod(
f = "registerMH", signature = c(object = "MatH"),
# register a row or a column vector of qfs of distributionH:
# if the cdf are different a a matrix resu is returned with the quantiles of the two
# distribution computed at the same levels of a common vector of cdfs.
# INPUT: object11 - a vector or a matrix two distributions
# OUTPUT: resu - a matrix of distributionH objects with
# recomputed quantiles on a common cdf
function(object) {
nrows <- nrow(object@M)
ncols <- ncol(object@M)
ndis <- nrows * ncols
# Check if the distributions are registered
if (is.registeredMH(object)) {
return(object)
}
commoncdf <- numeric(0)
for (i in 1:nrows) {
for (j in 1:ncols) {
commoncdf <- rbind(commoncdf, t(t(object@M[i, j][[1]]@p)))
}
}
commoncdf <- sort(unique(round(commoncdf, digits = 10)))
commoncdf[1] <- 0
commoncdf[length(commoncdf)] <- 1
# check for tiny bins and for very long vectors of wheights
# end of check
nr <- length(commoncdf)
result <- matrix(0, nr, (ndis + 1))
result[, (ndis + 1)] <- commoncdf
NEWMAT <- new("MatH", nrows, ncols)
for (r in 1:nrows) {
for (c in 1:ncols) {
x <- compQ_vect(object@M[r, c][[1]], vp = commoncdf)
# x=numeric(0)
# for (rr in 1:nr){
# x=c(x,compQ(object@M[r,c][[1]],commoncdf[rr]))
# }
NEWMAT@M[r, c][[1]] <- new("distributionH", x, commoncdf)
}
}
return(NEWMAT)
}
)
#' Method Center.cell.MatH Centers all the cells of a matrix of distributions
#' @name Center.cell.MatH
#' @rdname Center.cell.MatH-methods
#' @exportMethod Center.cell.MatH
setGeneric("Center.cell.MatH", function(object) standardGeneric("Center.cell.MatH")) # OK
#' @rdname Center.cell.MatH-methods
#' @aliases Center.cell.MatH,MatH-method
#' @description The function transform a MatH object (i.e. a matrix of distributions),
#' such that each distribution is shifted and has a mean equal to zero
#' @param object a MatH object, a matrix of distributions.
#' @return A \code{MatH} object, having each distribution with a zero mean.
#' @examples
#' CEN_BLOOD <- Center.cell.MatH(BLOOD)
#' get.MatH.stats(BLOOD, stat = "mean")
setMethod(
f = "Center.cell.MatH", signature = c(object = "MatH"),
function(object) {
nr <- get.MatH.nrows(object)
nc <- get.MatH.ncols(object)
NM <- object
for (i in 1:nr) {
for (j in 1:nc) {
NM@M[i, j][[1]]@x <- NM@M[i, j][[1]]@x - NM@M[i, j][[1]]@m
NM@M[i, j][[1]]@m <- 0
}
}
return(NM)
}
)
## Show overridding ----
#' Method show for MatH
#' @name show-MatH
#' @rdname show-MatH-methods
#' @docType methods
# @aliases show,distributionH-method
# @name show
# @rdname show-MatH
#' @aliases show,MatH-method
#' @description An overriding show method for a \code{MatH} object. The method returns a representation
#' of the matrix using the mean and the standard deviation for each histogram.
#' @param object a \code{MatH} object
#' @examples
#' show(BLOOD)
#' print(BLOOD)
#' BLOOD
setMethod("show",
signature(object = "MatH"),
definition = function(object) {
cat("a matrix of distributions \n", paste(
ncol(object@M), " variables ",
nrow(object@M), " rows \n"
), "each distibution in the cell is represented by the mean and the standard deviation \n ")
mymat <- matrix(0, nrow(object@M) + 1, ncol(object@M))
for (i in 1:ncol(object@M)) {
mymat[1, i] <- colnames(object@M)[i]
}
for (i in 1:nrow(object@M)) {
for (j in 1:ncol(object@M)) {
if (length(object@M[i, j][[1]]@x) == 0) {
mymat[i + 1, j] <- paste("Empty distribution")
}
else {
if ((abs(object@M[i, j][[1]]@m) > 1e5 || abs(object@M[i, j][[1]]@m) < 1e-5) &&
(object@M[i, j][[1]]@s > 1e5 || object@M[i, j][[1]]@s < 1e-5)) {
mymat[i + 1, j] <- paste(
"[m=", format(object@M[i, j][[1]]@m, digits = 5, scientific = TRUE),
" ,s=", format(object@M[i, j][[1]]@s, digits = 5, scientific = TRUE), "]"
)
}
if ((abs(object@M[i, j][[1]]@m) <= 1e5 && abs(object@M[i, j][[1]]@m) >= 1e-5) &&
(object@M[i, j][[1]]@s <= 1e5 || object@M[i, j][[1]]@s >= 1e-5)) {
mymat[i + 1, j] <- paste(
"[m=", format(object@M[i, j][[1]]@m, digits = 5),
" ,s=", format(object@M[i, j][[1]]@s, digits = 5), "]"
)
}
if ((abs(object@M[i, j][[1]]@m) > 1e5 || abs(object@M[i, j][[1]]@m) < 1e-5) &&
(object@M[i, j][[1]]@s <= 1e5 && object@M[i, j][[1]]@s >= 1e-5)) {
mymat[i + 1, j] <- paste(
"[m=", format(object@M[i, j][[1]]@m, digits = 5, scientific = TRUE),
" ,s=", format(object@M[i, j][[1]]@s, digits = 5), "]"
)
}
if ((abs(object@M[i, j][[1]]@m) <= 1e5 && abs(object@M[i, j][[1]]@m) >= 1e-5) &&
(object@M[i, j][[1]]@s > 1e5 || object@M[i, j][[1]]@s < 1e-5)) {
mymat[i + 1, j] <- paste(
"[m=", format(object@M[i, j][[1]]@m, digits = 5),
" ,s=", format(object@M[i, j][[1]]@s, digits = 5, scientific = TRUE), "]"
)
}
}
}
}
rownames(mymat) <- c(
paste(rep(" ", nchar(rownames(object@M)[1])), collapse = ""),
row.names(object@M)
)
write.table(format(mymat, justify = "centre"), row.names = T, col.names = F, quote = F)
}
)
if (!isGeneric("plot")) {
setGeneric(
"plot",
function(x, y, ...) standardGeneric("plot")
)
}
# Plot overloading ----
#' Method plot for a matrix of histograms
#' @name plot-MatH
#' @docType methods
#' @rdname plot-MatH
#' @aliases plot,MatH-method
#' @description An overloading plot function for a \code{MatH} object. The method returns a graphical representation
#' of the matrix of histograms.
#' @param x a \code{distributionH} object
#' @param y not used in this implementation
#' @param type (optional) a string describing the type of plot, default="HISTO".\cr
#' Other allowed types are \cr
#' "DENS"=a density approximation, \cr
#' "BOXPLOT"=l boxplot
#' @param border (optional) a string the color of the border of the plot, default="black".
#' @param angL (optional) angle of labels of rows (DEFAULT=330).
#' @examples
#' plot(BLOOD) # plots BLOOD dataset
#' \dontrun{
#' plot(BLOOD, type = "HISTO", border = "blue") # plots a matrix of histograms
#' plot(BLOOD, type = "DENS", border = "blue") # plots a matrix of densities
#' plot(BLOOD, type = "BOXPLOT") # plots a boxplots
#' }
#' @importFrom utils write.table
#' @export
setMethod(
"plot",
signature(x = "MatH"),
function(x, y = "missing", type = "HISTO", border = "black", angL = 330) {
plot.M(x, type = type, border = border, angL = angL)
}
)
#' Method get.cell.MatH Returns the histogram in a cell of a matrix of distributions
#' @name get.cell.MatH
#' @rdname get.cell.MatH-methods
#' @exportMethod get.cell.MatH
setGeneric("get.cell.MatH", function(object, r, c) standardGeneric("get.cell.MatH")) # OK
#' @rdname get.cell.MatH-methods
#' @aliases get.cell.MatH,MatH-method
#' @description Returns the histogram data in the r-th row and the c-th column.
#' @param object a MatH object, a matrix of distributions.
#' @param r an integer, the row index.
#' @param c an integer, the column index
#'
#' @return A \code{distributionH} object.
#' @examples
#' get.cell.MatH(BLOOD, r = 1, c = 1)
setMethod(
f = "get.cell.MatH", signature = c(object = "MatH", r = "numeric", c = "numeric"),
function(object, r, c) {
nr <- get.MatH.nrows(object)
nc <- get.MatH.ncols(object)
r <- as.integer(r)
c <- as.integer(c)
if (r > nr | r < 1 | c < 1 | c > nc) {
print("Indices out of range")
return(NULL)
} else {
Dist <- object@M[r, c][[1]]
}
return(Dist)
}
)
#' Method set.cell.MatH assign a histogram to a cell of a matrix of histograms
#' @name set.cell.MatH
#' @rdname set.cell.MatH-methods
#' @exportMethod set.cell.MatH
setGeneric("set.cell.MatH", function(object, mat, r, c) standardGeneric("set.cell.MatH")) # OK
#' @rdname set.cell.MatH-methods
#' @aliases set.cell.MatH,MatH-method
#' @description Assign a histogram data to the r-th row and the c-th column of a matrix of histograms.
#' @param object a distributionH object, a matrix of distributions.
#' @param mat a MatH object, a matrix of distributions.
#' @param r an integer, the row index.
#' @param c an integer, the column index
#'
#' @return A \code{MatH} object.
#' @examples
#' mydist <- distributionH(x = c(0, 1, 2, 3, 4), p = c(0, 0.1, 0.6, 0.9, 1))
#' MAT <- set.cell.MatH(mydist, BLOOD, r = 1, c = 1)
setMethod(
f = "set.cell.MatH", signature = c(object = "distributionH", mat = "MatH", r = "numeric", c = "numeric"),
function(object, mat, r, c) {
nr <- get.MatH.nrows(mat)
nc <- get.MatH.ncols(mat)
r <- as.integer(r)
c <- as.integer(c)
if (r > nr | r < 1 | c < 1 | c > nc) {
print("Indices out of range")
return(NULL)
} else {
mat@M[r, c][[1]] <- object
}
return(mat)
}
)
|
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 35 # number of knots to use for the basis functions
cv <- 2 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 35 # number of knots to use for the basis functions
cv <- 7 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 40 # number of knots to use for the basis functions
cv <- 2 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 40 # number of knots to use for the basis functions
cv <- 7 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R") | /markdown/fire-analysis/fit-ebf-35-2.R | permissive | sammorris81/extreme-decomp | R | false | false | 2,650 | r | rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 35 # number of knots to use for the basis functions
cv <- 2 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 35 # number of knots to use for the basis functions
cv <- 7 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 40 # number of knots to use for the basis functions
cv <- 2 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "ebf" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 40 # number of knots to use for the basis functions
cv <- 7 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R") |
test_that("get_predicted", {
# easystats conventions
df1 <- cbind.data.frame(
CI_low = -2.873,
t = 5.494,
CI_high = -1.088,
p = 0.00001,
Parameter = -1.980,
CI = 0.95,
df = 29.234,
Method = "Student's t-test"
)
expect_named(
standardize_column_order(df1, style = "easystats"),
c("Parameter", "CI", "CI_low", "CI_high", "Method", "t", "df", "p")
)
# broom conventions
df2 <- cbind.data.frame(
conf.low = -2.873,
statistic = 5.494,
conf.high = -1.088,
p.value = 0.00001,
estimate = -1.980,
conf.level = 0.95,
df = 29.234,
method = "Student's t-test"
)
expect_named(
standardize_column_order(df2, style = "broom"),
c(
"estimate", "conf.level", "conf.low", "conf.high", "method",
"statistic", "df", "p.value"
)
)
# deliberately misspecify column names
# the misspecified columns should be pushed to the end
df3 <- cbind.data.frame(
CI_Low = -2.873,
t = 5.494,
CI_High = -1.088,
p = 0.00001,
Parameter = -1.980,
CI = 0.95,
df = 29.234,
Method = "Student's t-test"
)
expect_named(
standardize_column_order(df3, style = "easystats"),
c("Parameter", "CI", "Method", "t", "df", "p", "CI_Low", "CI_High")
)
})
test_that("reorder columns BF", {
# brms_bf <- suppressWarnings(download_model("brms_bf_1"))
out <- data.frame(
Parameter = c("b_Intercept", "b_wt", "sigma"),
Component = c("conditional", "conditional", "sigma"),
Median = c(32.22175, -3.755645, 3.461165),
CI = c(0.95, 0.95, 0.95),
CI_low = c(27.2244525, -4.9688055, 2.6517275),
CI_high = c(35.75887, -2.21074025, 4.69652725),
pd = c(1, 1, 1),
ROPE_Percentage = c(0, 0, 0),
log_BF = c(14.4924732349718, 5.79962753110103, 8.89383915455679),
Rhat = c(1.00438747198895, 1.00100407213689, 0.992006699276081),
ESS = c(88.3152312142069, 91.7932788446396, 167.822262320689),
stringsAsFactors = FALSE
)
expect_named(
standardize_column_order(out),
c(
"Parameter", "Median", "Component", "CI", "CI_low", "CI_high",
"pd", "ROPE_Percentage", "log_BF", "Rhat", "ESS"
)
)
})
| /tests/testthat/test-standardize_column_order.R | no_license | cran/insight | R | false | false | 2,382 | r | test_that("get_predicted", {
# easystats conventions
df1 <- cbind.data.frame(
CI_low = -2.873,
t = 5.494,
CI_high = -1.088,
p = 0.00001,
Parameter = -1.980,
CI = 0.95,
df = 29.234,
Method = "Student's t-test"
)
expect_named(
standardize_column_order(df1, style = "easystats"),
c("Parameter", "CI", "CI_low", "CI_high", "Method", "t", "df", "p")
)
# broom conventions
df2 <- cbind.data.frame(
conf.low = -2.873,
statistic = 5.494,
conf.high = -1.088,
p.value = 0.00001,
estimate = -1.980,
conf.level = 0.95,
df = 29.234,
method = "Student's t-test"
)
expect_named(
standardize_column_order(df2, style = "broom"),
c(
"estimate", "conf.level", "conf.low", "conf.high", "method",
"statistic", "df", "p.value"
)
)
# deliberately misspecify column names
# the misspecified columns should be pushed to the end
df3 <- cbind.data.frame(
CI_Low = -2.873,
t = 5.494,
CI_High = -1.088,
p = 0.00001,
Parameter = -1.980,
CI = 0.95,
df = 29.234,
Method = "Student's t-test"
)
expect_named(
standardize_column_order(df3, style = "easystats"),
c("Parameter", "CI", "Method", "t", "df", "p", "CI_Low", "CI_High")
)
})
test_that("reorder columns BF", {
# brms_bf <- suppressWarnings(download_model("brms_bf_1"))
out <- data.frame(
Parameter = c("b_Intercept", "b_wt", "sigma"),
Component = c("conditional", "conditional", "sigma"),
Median = c(32.22175, -3.755645, 3.461165),
CI = c(0.95, 0.95, 0.95),
CI_low = c(27.2244525, -4.9688055, 2.6517275),
CI_high = c(35.75887, -2.21074025, 4.69652725),
pd = c(1, 1, 1),
ROPE_Percentage = c(0, 0, 0),
log_BF = c(14.4924732349718, 5.79962753110103, 8.89383915455679),
Rhat = c(1.00438747198895, 1.00100407213689, 0.992006699276081),
ESS = c(88.3152312142069, 91.7932788446396, 167.822262320689),
stringsAsFactors = FALSE
)
expect_named(
standardize_column_order(out),
c(
"Parameter", "Median", "Component", "CI", "CI_low", "CI_high",
"pd", "ROPE_Percentage", "log_BF", "Rhat", "ESS"
)
)
})
|
## Generating Plot 1
## define name of file to save plot to
png("plot1.png")
## setup to only have 1 plot on the output
par(mfrow = c(1,1))
## read full data file
hpower <- read.csv("household_power_consumption.txt", header = T, sep = ';',
na.strings = "?", nrows = 2075259, check.names = F,
stringsAsFactors = F, comment.char = "", quote = '\"')
## format the date field to d,m,y
hpower$Date <- as.Date(hpower$Date, format = "%d/%m/%Y")
## Subsetting the data to only include date range required
hpowerdata <- subset(hpower, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(hpower)
## Converting dates
datetime <- paste(as.Date(hpowerdata$Date), hpowerdata$Time)
hpowerdata$Datetime <- as.POSIXct(datetime)
## plot 1 - histogram
hist(hpowerdata$Global_active_power, col = "Red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
## close the PNG device
dev.off() | /Plot1.R | no_license | kevinmaher/RepData_Assessment1 | R | false | false | 952 | r | ## Generating Plot 1
## define name of file to save plot to
png("plot1.png")
## setup to only have 1 plot on the output
par(mfrow = c(1,1))
## read full data file
hpower <- read.csv("household_power_consumption.txt", header = T, sep = ';',
na.strings = "?", nrows = 2075259, check.names = F,
stringsAsFactors = F, comment.char = "", quote = '\"')
## format the date field to d,m,y
hpower$Date <- as.Date(hpower$Date, format = "%d/%m/%Y")
## Subsetting the data to only include date range required
hpowerdata <- subset(hpower, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(hpower)
## Converting dates
datetime <- paste(as.Date(hpowerdata$Date), hpowerdata$Time)
hpowerdata$Datetime <- as.POSIXct(datetime)
## plot 1 - histogram
hist(hpowerdata$Global_active_power, col = "Red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
## close the PNG device
dev.off() |
# This program caches the inverse of a matrix.
# For an input matrix, makeCacheMatrix creates
# a cache of the matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# We get the inverse of the matrix with this function.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | fermatr5/ProgrammingAssignment2 | R | false | false | 702 | r | # This program caches the inverse of a matrix.
# For an input matrix, makeCacheMatrix creates
# a cache of the matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# We get the inverse of the matrix with this function.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cycleGAN_models.R
\name{combined_flat_anneal}
\alias{combined_flat_anneal}
\title{Combined_flat_anneal}
\usage{
combined_flat_anneal(pct, start_lr, end_lr = 0, curve_type = "linear")
}
\arguments{
\item{pct}{Proportion of training with a constant learning rate.}
\item{start_lr}{Desired starting learning rate, used for beginnning pct of training.}
\item{end_lr}{Desired end learning rate, training will conclude at this learning rate.}
\item{curve_type}{Curve type for learning rate annealing. Options are 'linear', 'cosine', and 'exponential'.}
}
\description{
Create a schedule with constant learning rate `start_lr` for `pct`
proportion of the training, and a `curve_type` learning rate (till `end_lr`) for
remaining portion of training.
}
| /man/combined_flat_anneal.Rd | permissive | Cdk29/fastai | R | false | true | 825 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cycleGAN_models.R
\name{combined_flat_anneal}
\alias{combined_flat_anneal}
\title{Combined_flat_anneal}
\usage{
combined_flat_anneal(pct, start_lr, end_lr = 0, curve_type = "linear")
}
\arguments{
\item{pct}{Proportion of training with a constant learning rate.}
\item{start_lr}{Desired starting learning rate, used for beginnning pct of training.}
\item{end_lr}{Desired end learning rate, training will conclude at this learning rate.}
\item{curve_type}{Curve type for learning rate annealing. Options are 'linear', 'cosine', and 'exponential'.}
}
\description{
Create a schedule with constant learning rate `start_lr` for `pct`
proportion of the training, and a `curve_type` learning rate (till `end_lr`) for
remaining portion of training.
}
|
##################################################################
Read.Header <- function(Header.File) {
##################################################################
I=17 #(number of life stages) ### Need to Read This in DATA FILE
I5=10 # Number of potential pre-smolt years (Steelhead)
Header.File = "Watershed_Header_File.csv"
# Read in Watershed Information Table 2.3
#WshedKJQ=read.xlsx2(Header.File, sheetName="Header",
# startRow=2, endRow=11, colClasses=c("numeric"),
# colIndex=3:3, header=F)
WshedKJQ = read.csv(Header.File, skip=1, nrows=10, header=F)[,3]
WshedKJQ
#K, Q, and J: Number of Sites, Land Use Cats per site, Habitat Types per Site
K=as.numeric(WshedKJQ[1])
N.input.files=as.numeric(WshedKJQ[2])
Q=as.numeric(WshedKJQ[3])
J=as.numeric(WshedKJQ[4])
G=11
Tr=as.numeric(WshedKJQ[5])
R=as.numeric(WshedKJQ[6])
MCsim1=as.numeric(WshedKJQ[7])
MCsim2=as.numeric(WshedKJQ[8])
MCsim3=as.numeric(WshedKJQ[9])
MCsim4=as.numeric(WshedKJQ[10])
rm(WshedKJQ)
N.input.files
Tr
#T.step.change = read.xlsx2(Header.File, sheetName="Header",
# startRow=16, endRow=round(16+N.input.files-1),colIndex=3:3, header=F,
# colClasses=c("numeric"))[,1]
T.step.change = as.numeric(read.csv(Header.File,
skip=13, nrows=1, header=F)[,4:(4+N.input.files-1)])
T.step.change
# read in input file names (one input file for each step change in inputs)
#Input.file.names = as.character(read.xlsx(Header.File, sheetName="Header",
# rowIndex=16:(16+N.input.files-1), colIndex=2:2, header=F)[,1])
#Input.file.names = as.character(read.xlsx2(Header.File, sheetName="Header",
# startRow=16, endRow=(16+N.input.files-1),
# colClasses=c("character"),
#colIndex=2:2, header=F)[,1])
file.names =
as.matrix((
read.csv(Header.File,
skip = 15, nrows = K,
header=F,
colClasses = rep("character", (1+N.input.files)))[1:K, 3:(N.input.files+3)]
))
Input.file.names= array(file.names[,2:(1+N.input.files)], c(K, N.input.files))
Init.file.names = file.names[,1]
Cross.site.migration.file.names =
c(
read.csv(Header.File,
skip = 26, nrows = 1,
header=F,
colClasses = rep("character", (1+N.input.files)))[1, 4:(N.input.files+3)]
)
Cross.site.migration.file.names
Site.Names = read.csv(Header.File, skip=15, nrows=K, header=F,
colClasses="character")[,2]
Site.Names
# Will have to use Input.file.names in read data function to change to reading
# different input files for each site, rather than different worksheets
# for each seet, due to the switch to .csv files
# Won't even read site names in header file, as each input sheet
# is it's own site, so the name will be read from the input sheet
# or "site profile" directly.
#watersheds = read.xlsx(Header.File, sheetName="Header",
# rowIndex=31:(31+K-1), colIndex=2:6, header=F)
#watersheds = read.xlsx2(Header.File, sheetName="Header",
# startRow= 31, endRow=(31+K-1),
# colClasses=rep("character",5),
# colIndex=2:6, header=F)
#watersheds
# Watershed.index = as.character(watersheds[,1])
# River.index=as.character(watersheds[,2])
# Stream.index=as.character(watersheds[,3])
# Reach.index=as.character(watersheds[,4])
# Site.index=as.character(watersheds[,5])
#rm(watersheds)
return(
list(
"I"=I, "I5"=I5,"G"=G,
"K"=K, "N.input.files"=N.input.files, "Q"=Q, "J"=J, "Tr"=Tr, "R"=R,
"MCsim1"=MCsim1, "MCsim2"=MCsim2,"MCsim3"=MCsim3,"MCsim4"=MCsim4,
"T.step.change" = T.step.change,
"Input.file.names" = Input.file.names,
"Init.file.names" = Init.file.names,
"Cross.site.migration.file.names" = Cross.site.migration.file.names,
"Site.Names"=Site.Names
)
)
}# end of function
## Finished to here - updated Reading Header File from .csv.
## Need to continue, reading input file(s) from .csv as well.
## Will now have multiple input files names - will have to update
## input file name for each site.
##################################################################
##################################################################
Read.Input.File <- function(header) {
attach(header)
#attach(header)
######################
# Initialize Vectors
M.mu = array(rep(0,(K*J*Q*Tr)),c(K,Q,J,Tr))
M.target = M.mu
M.alphaR.N = array(rep(0,(K*Q*Tr)),c(K,Q,Tr))
M.alphaT.N = M.alphaR.N
M.alphaS.N = M.alphaR.N
M.alpha.N = M.alphaR.N
M.rate = M.alphaR.N
Ak_x_Lqk.mu=array(rep(0,K*Q*Tr),c(K,Q,Tr))
Ak_x_Lqk.sigmaR=Ak_x_Lqk.mu
Ak_x_Lqk.sigmaT=Ak_x_Lqk.mu
Ak_x_Lqk.sigmaS=Ak_x_Lqk.mu
Ak_x_Lqk.sigma=Ak_x_Lqk.mu
Ak_x_Lqk.target=Ak_x_Lqk.mu
Ak_x_Lqk.rate=Ak_x_Lqk.mu
D.mu=array(rep(0, K* 5*(J+1)*Tr), c(K, J, 5, Tr))
D.sigmaR=D.mu
D.sigmaT=D.mu
D.sigmaS=D.mu
D.sigma=D.mu
D.target=D.mu
D.rate = D.mu
Prod_Scalar.mu=array(rep(0, K*5*Q*Tr), c(K, Q, 5, Tr))
Prod_Scalar.sigmaR=Prod_Scalar.mu
Prod_Scalar.sigmaT=Prod_Scalar.mu
Prod_Scalar.sigmaS=Prod_Scalar.mu
Prod_Scalar.sigma=Prod_Scalar.mu
Prod_Scalar.target = Prod_Scalar.mu
Prod_Scalar.rate = Prod_Scalar.mu
Sr.mu = array(rep(0, K*I*Tr), c(K,I,Tr))
Sr.alphaR.N= Sr.mu
Sr.alphaT.N= Sr.mu
Sr.alphaS.N= Sr.mu
Sr.alpha.N = Sr.mu
Sr.target = Sr.mu
Sr.rate = Sr.mu
# Presmolt Stuff...
SR5.mu = array(rep(0, K*I5*Tr), c(K, I5, Tr))
SR5.alphaR=SR5.mu
SR5.alphaT=SR5.mu
SR5.alphaS=SR5.mu
SR5.alpha=SR5.mu
SR5.target = SR5.mu
SR5.rate = SR5.mu
N5.Psmolt_Female.mu = SR5.mu
N5.Pspawn_Female.mu = SR5.mu
N5.Pstay_Female.mu = SR5.mu
N5.P.alphaR_Female.N = SR5.mu
N5.P.alphaT_Female.N = SR5.mu
N5.P.alphaS_Female.N = SR5.mu
N5.P.alpha_Female.N = SR5.mu
N5.Psmolt_Female.target = SR5.mu
N5.Pspawn_Female.target = SR5.mu
N5.Pstay_Female.target = SR5.mu
N5.P_Female.rate = SR5.mu
N5.Psmolt_Male.mu = SR5.mu
N5.Pspawn_Male.mu = SR5.mu
N5.Pstay_Male.mu = SR5.mu
N5.P.alphaR_Male.N = SR5.mu
N5.P.alphaT_Male.N = SR5.mu
N5.P.alphaS_Male.N = SR5.mu
N5.P.alpha_Male.N = SR5.mu
N5.Psmolt_Male.target = SR5.mu
N5.Pspawn_Male.target = SR5.mu
N5.Pstay_Male.target = SR5.mu
N5.P_Male.rate = SR5.mu
N5.Rainbow.Fecundity = array(rep(0, K*I5*Tr), c(K, I5, Tr))
N5.cap.mu = SR5.mu
N5.cap.sigmaR = SR5.mu
N5.cap.sigmaT = SR5.mu
N5.cap.sigmaS = SR5.mu
N5.cap.sigma = SR5.mu
N5.cap.target = SR5.mu
N5.cap.rate = SR5.mu
# Adult (ocean) fish by ocean age parameters (track up to 10 ocean years)
Mat8Plus_Female.mu = array(rep(0,K*10*Tr), c(K, 10, Tr))
Mat8Plus_Female.alphaR.N = Mat8Plus_Female.mu
Mat8Plus_Female.alphaT.N = Mat8Plus_Female.mu
Mat8Plus_Female.alphaS.N = Mat8Plus_Female.mu
Mat8Plus_Female.alpha.N = Mat8Plus_Female.mu
Mat8Plus_Female.target = Mat8Plus_Female.mu
Mat8Plus_Female.rate = Mat8Plus_Female.mu
Mat8Plus_Male.mu = array(rep(0,K*10*Tr), c(K, 10, Tr))
Mat8Plus_Male.alphaR.N = Mat8Plus_Female.mu
Mat8Plus_Male.alphaT.N = Mat8Plus_Female.mu
Mat8Plus_Male.alphaS.N = Mat8Plus_Female.mu
Mat8Plus_Male.alpha.N = Mat8Plus_Female.mu
Mat8Plus_Male.target = Mat8Plus_Female.mu
Mat8Plus_Male.rate = Mat8Plus_Female.mu
# Fc.by.O.Age.mu = Mat8Plus.mu
# Fc.by.O.Age.sigmaR = Mat8Plus.mu
# Fc.by.O.Age.sigmaT = Mat8Plus.mu
# Fc.by.O.Age.sigmaS = Mat8Plus.mu
# Fc.by.O.Age.sigma = Mat8Plus.mu
# Fc.by.O.Age.target = Fc.by.O.Age.mu
# Fc.by.O.Age.rate = Fc.by.O.Age.mu
C_ocean.mu = Mat8Plus_Female.mu
C_ocean.sigmaR = Mat8Plus_Female.mu
C_ocean.sigmaT = Mat8Plus_Female.mu
C_ocean.sigmaS = Mat8Plus_Female.mu
C_ocean.sigma = Mat8Plus_Female.mu
C_ocean.target = C_ocean.mu
C_ocean.rate = C_ocean.mu
# frac
frac.mu = array(rep(0, K*5*(J)*Tr), c(K, 5, (J), Tr))
frac.sigmaR = frac.mu
frac.sigmaT = frac.mu
frac.sigmaS = frac.mu
frac.sigma = frac.mu
frac.target = frac.mu
frac.rate = frac.mu
harvest.wild.mu = array(rep(0,K*Tr), c(K, Tr))
harvest.wild.sigmaR = harvest.wild.mu
harvest.wild.sigmaT = harvest.wild.mu
harvest.wild.sigmaS = harvest.wild.mu
harvest.wild.sigma = harvest.wild.mu
harvest.hatch.mu = harvest.wild.mu
harvest.hatch.sigmaR = harvest.wild.mu
harvest.hatch.sigmaT = harvest.wild.mu
harvest.hatch.sigmaS = harvest.wild.mu
harvest.hatch.sigma = harvest.wild.mu
harvest.wild.target = harvest.wild.mu
harvest.wild.rate = harvest.wild.mu
harvest.hatch.target = harvest.wild.mu
harvest.hatch.rate = harvest.wild.mu
Hatch_Fish.mu = array(rep(0, K*I*Tr), c(K, I, Tr))
Hatch_Fish.sigmaR = Hatch_Fish.mu
Hatch_Fish.sigmaT = Hatch_Fish.mu
Hatch_Fish.sigmaS = Hatch_Fish.mu
Hatch_Fish.sigma = Hatch_Fish.mu
Hatch_Fish.target = Hatch_Fish.mu
Hatch_Fish.rate = Hatch_Fish.mu
# Rel_Surv (G categories)
Rel_Surv.mu = array(rep(0, K*I*Tr*G), c(K, I, Tr, G))
Rel_Surv.sigmaR = Rel_Surv.mu
Rel_Surv.sigmaT = Rel_Surv.mu
Rel_Surv.sigmaS = Rel_Surv.mu
Rel_Surv.sigma = Rel_Surv.mu
Rel_Surv.target= Rel_Surv.mu
Rel_Surv.rate= Rel_Surv.mu
#Male Female Ratio
Post_Spawn_Survival_Anadromous_M.mu = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
Post_Spawn_Survival_Anadromous_F.mu = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
Post_Spawn_Survival_Rainbow_M.mu = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
Post_Spawn_Survival_Rainbow_F.mu = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
#Female_Frac.mu = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
#Fecundity of Female Spawners by Ocean Age)
Female_Fecundity = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
# Rel_Com (G categories)
Rel_Comp.mu = array(rep(0, K*I*Tr*G), c(K, I, Tr, G))
Rel_Comp.sigmaR = Rel_Comp.mu
Rel_Comp.sigmaT = Rel_Comp.mu
Rel_Comp.sigmaS = Rel_Comp.mu
Rel_Comp.sigma = Rel_Comp.mu
Rel_Comp.target= Rel_Comp.mu
Rel_Comp.rate= Rel_Comp.mu
# Rel_Fecund
Rel_Fecund.mu = array(rep(0,K*Tr*G), c(K, Tr, G))
Rel_Fecund.simgaR = Rel_Fecund.mu
Rel_Fecund.simgaT = Rel_Fecund.mu
Rel_Fecund.simgaS = Rel_Fecund.mu
Rel_Fecund.simga = Rel_Fecund.mu
Rel_Fecund.target = Rel_Fecund.mu
Rel_Fecund.rate = Rel_Fecund.mu
Fry.x.siteMigration.mu = array(rep(0, K*K*Tr), c(K,K,Tr))
Par.x.siteMigration.mu = array(rep(0, K*K*Tr), c(K,K,Tr))
Presmolt.x.siteMigration.mu = array(rep(0, K*K*Tr), c(K,K,Tr))
Spawner.x.siteMigration.mu = array(rep(0, K*K*Tr), c(K,K,Tr))
Fry.x.siteMigration.target = array(rep(0, K*K*Tr), c(K,K,Tr))
Par.x.siteMigration.target = array(rep(0, K*K*Tr), c(K,K,Tr))
Presmolt.x.siteMigration.target = array(rep(0, K*K*Tr), c(K,K,Tr))
Spawner.x.siteMigration.target = array(rep(0, K*K*Tr), c(K,K,Tr))
Fry.x.siteMigration.alphaR = array(rep(0, K*Tr), c(K, Tr))
Fry.x.siteMigration.alphaT = array(rep(0,K*Tr), c(K, Tr))
Fry.x.siteMigration.alphaS = array(rep(0,K*Tr), c(K, Tr))
Fry.x.siteMigration.alpha = array(rep(0,K*Tr), c(K, Tr))
Par.x.siteMigration.alphaR = array(rep(0,K*Tr), c(K, Tr))
Par.x.siteMigration.alphaT = array(rep(0,K*Tr), c(K, Tr))
Par.x.siteMigration.alphaS = array(rep(0,K*Tr), c(K, Tr))
Par.x.siteMigration.alpha = array(rep(0,K*Tr), c(K, Tr))
Presmolt.x.siteMigration.alphaR = array(rep(0,K*Tr), c(K, Tr))
Presmolt.x.siteMigration.alphaT = array(rep(0,K*Tr), c(K, Tr))
Presmolt.x.siteMigration.alphaS = array(rep(0,K*Tr), c(K, Tr))
Presmolt.x.siteMigration.alpha = array(rep(0,K*Tr), c(K, Tr))
Spawner.x.siteMigration.alphaR = array(rep(0,K*Tr), c(K, Tr))
Spawner.x.siteMigration.alphaT = array(rep(0,K*Tr), c(K, Tr))
Spawner.x.siteMigration.alphaS = array(rep(0,K*Tr), c(K, Tr))
Spawner.x.siteMigration.alpha = array(rep(0,K*Tr), c(K, Tr))
Fry.x.siteMigration.rate = array(rep(0, K*Tr), c(K, Tr))
Par.x.siteMigration.rate = array(rep(0, K*Tr), c(K, Tr))
Presmolt.x.siteMigration.rate = array(rep(0, K*Tr), c(K, Tr))
Spawner.x.siteMigration.rate = array(rep(0, K*Tr), c(K, Tr))
###########################
################################
################################
# loop through each site within input file
k=1
n.step=1
for (n.step in 1:N.input.files) {
T.lo= as.numeric(T.step.change[n.step])
if (n.step==N.input.files) {T.hi=Tr} else {T.hi= as.numeric(T.step.change[n.step+1])-1}
T.lo
T.hi
n.step
N.input.files
T.step.change
# loop through each input file
#Watershed.Input.File=Input.file.names[n.step]
for (k in 1:K) {
#print(k)
# Site=paste("Site",k,sep="")
Watershed.Input.File = as.character(Input.file.names[k, n.step])
# Read the M's
#T2.3 <-read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=26, endRow=(26+Q-1), colClasses=rep("numeric",J),
# rowIndex=26:(26+Q-1),
# colIndex=3:(3+J-1), header=F)
T2.3 <- as.matrix(read.csv(Watershed.Input.File, header=F,skip=27, nrows=Q)[,3:(3+J-1)])
T2.3
#T2.3Nalpha <- read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=26, endRow=(26+Q-1), colClasses=rep("numeric",4),
## rowIndex=26:(26+Q-1),
# colIndex=15:18, header=F)
T2.3Nalpha <- as.matrix(read.csv(Watershed.Input.File, header=F,
skip=27, nrows=Q)[,15:18])
T2.3Nalpha
#T2.3target <- read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=26, endRow=(26+Q-1), colClasses= rep("numeric",13),
## rowIndex=26:(26+Q-1),
# colIndex=19:31, header=F)
T2.3target <- as.matrix(read.csv(Watershed.Input.File, header=F,
skip=27, nrows=Q)[,19:(31)])
T2.3target
#T2.3rate <- read.xlsx(Watershed.Input.File, sheetName=Site,
# rowIndex=26:(26+Q-1), colIndex=19:3, header=F)
T2.3rate <- read.csv(Watershed.Input.File, header=F,
skip=27, nrows=Q)[,31]
T2.3rate
#as.numeric(T.lo):as.numeric(T.hi)
for (t in as.numeric(T.lo):as.numeric(T.hi)) {
M.alphaR.N[k,,t]=T2.3Nalpha[,1]
M.alphaT.N[k,,t]=T2.3Nalpha[,2]
M.alphaS.N[k,,t]=T2.3Nalpha[,3]
M.alpha.N[k,,t]= T2.3Nalpha[,4]
M.rate[k,,t]=T2.3target[,13]
for (j in 1:J) {
M.mu[k,,j,t]=as.numeric(T2.3[,j])
M.target[k,,j,t] = as.numeric(T2.3target[,j])
} #close j
}# close t
dim(M.mu)
M.mu[1,1,1,1:10]
M.target[1,1,1,1:10]
M.alphaR.N[k,,t]
#} # close site
#} # close time
##### OK to here... repeat for all other variables (7/8/2013)
#######################################################
# Read Ak_x_Lqk_vectors
#Ak <-read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=9, endRow=(9+Q-1), colClasses=rep("numeric",7),
## rowIndex=9:(9+Q-1),
#colIndex=3:9, header=F)
Ak <- read.csv(Watershed.Input.File, header=F,
skip=9, nrows=Q)[,3:9]
Ak
for (t in T.lo:T.hi) {
Ak_x_Lqk.mu[k, ,t] <-Ak[,1]
Ak_x_Lqk.sigmaR[k, ,t] <-Ak[,2]
Ak_x_Lqk.sigmaT[k, ,t] <-Ak[,3]
Ak_x_Lqk.sigmaS[k, ,t] <-Ak[,4]
Ak_x_Lqk.sigma[k, ,t] <-Ak[,5]
Ak_x_Lqk.target[k, ,t] <-Ak[,6]
Ak_x_Lqk.rate[k, ,t] <- Ak[,7]
} # end t
dim(Ak_x_Lqk.mu)
Ak_x_Lqk.mu[,,1:10]
Ak_x_Lqk.target[,,1:10]
rm(Ak)
#### OK to here 7/8/2013 3:03 pm #######
#########################################
# Read in Table 2_4 (to get to D matrix)
#Dtable= read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=43, endRow=55, colClasses=rep("numeric",35),
## rowIndex=43:55,
#colIndex=3:37, header=F)
Dtable = read.csv(Watershed.Input.File, header=F,
skip=44, nrows=12)[,3:37]
# Note - this has been updated so that capcity of spawning gravel is input directly in "spawner to egg" category
for (t in T.lo:T.hi) {
for (i in 1:5) {
D.mu[k,1:J,i,t] = Dtable[1:J,i]
#D.mu[k, (J+1),i ,t] = Dtable[13, i]
D.sigmaR[k,1:J,i,t] = Dtable[1:J,(i+5)]
#D.sigmaR[k, (J+1),i ,t] = Dtable[13, (i+5)]
D.sigmaT[k,1:J,i,t] = Dtable[1:J,(i+10)]
#D.sigmaT[k, (J+1),i ,t] = Dtable[13, (i+10)]
D.sigmaS[k,1:J,i,t] = Dtable[1:J,(i+15)]
#D.sigmaS[k, (J+1),i ,t] = Dtable[13, (i+15)]
D.sigma[k,1:J,i,t] = Dtable[1:J,(i+20)]
#D.sigma[k, (J+1),i ,t] = Dtable[13,(i+20)]
D.target[k,1:J,i,t] = Dtable[1:J,(i+25)]
#D.target[k,(J+1),i,t] = Dtable[13,(i+25)]
D.rate[k,1:J,i,t] = Dtable[1:J, (i+30)]
#D.rate[k,(J+1),i,t] = Dtable[13, (i+30)]
}
}
D.mu[1,1,1,1:10]
D.target[1,1,1,1:10]
rm(Dtable)
####### Productivity Scalars
#Etable= read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=61, endRow=(61+Q-1), colClasses=rep("numeric",36),
## rowIndex=61:(61+Q-1),
# colIndex=3:38, header=F)
Etable = read.csv(Watershed.Input.File, header=F,
skip=62, nrows=Q)[,3:37]
Etable
for (t in T.lo:T.hi) {
for (i in 1:5) {
Prod_Scalar.mu[k,1:Q,i,t] = Etable[1:Q,i]
Prod_Scalar.sigmaR[k,1:Q,i,t] = Etable[1:Q,(i+5)]
Prod_Scalar.sigmaT[k,1:Q,i,t] = Etable[1:Q,(i+10)]
Prod_Scalar.sigmaS[k,1:Q,i,t] = Etable[1:Q,(i+15)]
Prod_Scalar.sigma[k,1:Q,i,t] = Etable[1:Q,(i+20)]
Prod_Scalar.target[k,1:Q,i,t] = Etable[1:Q,(i+25)]
Prod_Scalar.rate[k,1:Q,i,t] = Etable[1:Q,(i+30)]
} #close i
} # close t
rm(Etable)
Prod_Scalar.mu[1,1,1,1:10]
Prod_Scalar.target[1,1,1,1:10]
#?read.xlsx
#SrTable = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=78, endRow=(78+I-1-1), colClasses=rep("numeric",7),
## rowIndex=78:(78+I-1-1),
# colIndex=4:10, header=F)
SrTable = read.csv(Watershed.Input.File, header=F,
skip= 79, nrows = I-1)[,4:10]
SrTable
for (t in T.lo:T.hi) {
Sr.mu[k,2:I ,t] = (SrTable[,1])
Sr.alphaR.N[k,2:I ,t]= SrTable[,2]
Sr.alphaT.N[k,2:I ,t]= SrTable[,3]
Sr.alphaS.N[k,2:I ,t]= SrTable[,4]
Sr.alpha.N[k,2:I ,t]= SrTable[,5]
Sr.target[k, 2:I, t] = SrTable[,6]
Sr.rate[k, 2:I, t] = SrTable[,7]
}
rm(SrTable)
dim(Sr.mu)
Sr.mu[1,1:5,1:10]
Sr.target[1,1:5, 1:10]
### Presmolt Inputs
#PSinputs = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=99, endRow=(99+I5-1),colClasses=rep("numeric", 26),
## rowIndex=99:(99+I5-1),
# colIndex=3:28, header=F)
PSinputs = read.csv(Watershed.Input.File, header=F,
skip=100, nrows = I5)[, 3:39]
PSinputs
for (t in T.lo:T.hi) {
SR5.mu[k, ,t] = PSinputs[,1]
SR5.alphaR[k, ,t] = PSinputs[,2]
SR5.alphaT[k, ,t] = PSinputs[,3]
SR5.alphaS[k, ,t]= PSinputs[,4]
SR5.alpha[k, ,t]= PSinputs[,5]
SR5.target[k, ,t] = PSinputs[,25]
SR5.rate[k, ,t] = PSinputs[,26]
N5.Psmolt_Female.mu[k, ,t]= PSinputs[,6]
N5.Pspawn_Female.mu[k, ,t] = PSinputs[,7]
N5.Pstay_Female.mu[k, ,t] = PSinputs[,8]
N5.P.alphaR_Female.N[k, ,t]= PSinputs[,9]
N5.P.alphaT_Female.N[k, ,t]= PSinputs[,10]
N5.P.alphaS_Female.N[k, ,t]= PSinputs[,11]
N5.P.alpha_Female.N[k, ,t] = PSinputs[,12]
N5.Psmolt_Male.mu[k, ,t]= PSinputs[,13]
N5.Pspawn_Male.mu[k, ,t] = PSinputs[,14]
N5.Pstay_Male.mu[k, ,t] = PSinputs[,15]
N5.P.alphaR_Male.N[k, ,t]= PSinputs[,16]
N5.P.alphaT_Male.N[k, ,t]= PSinputs[,17]
N5.P.alphaS_Male.N[k, ,t]= PSinputs[,18]
N5.P.alpha_Male.N[k, ,t] = PSinputs[,19]
N5.Psmolt_Female.target[k, ,t]=PSinputs[,27]
N5.Pspawn_Female.target[k, ,t]=PSinputs[,28]
N5.Pstay_Female.target[k, ,t]=PSinputs[,29]
N5.P_Female.rate[k, ,t] = PSinputs[,33]
N5.Psmolt_Male.target[k, ,t]=PSinputs[,30]
N5.Pspawn_Male.target[k, ,t]=PSinputs[,31]
N5.Pstay_Male.target[k, ,t]=PSinputs[,32]
N5.P_Male.rate[k, ,t] = PSinputs[,34]
PSinputs
N5.P_Male.rate
N5.cap.mu[k, ,t] = PSinputs[,20]
N5.cap.sigmaR[k, ,t]= PSinputs[,21]
N5.cap.sigmaT[k, ,t]= PSinputs[,22]
N5.cap.sigmaS[k, ,t]= PSinputs[,23]
N5.cap.sigma[k, ,t] = PSinputs[,24]
N5.cap.target[k, ,t] = PSinputs[,35]
N5.cap.rate[k, ,t] = PSinputs[,36]
N5.Rainbow.Fecundity[k, ,t] = PSinputs[,37]
}
N5.Rainbow.Fecundity
N5.cap.mu
rm(PSinputs)
#o.inputs = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=113, endRow=(113+10-1), colClasses=rep("numeric",21),
## rowIndex=113:(113+10-1),
# colIndex=4:24, header=F)
o.inputs = read.csv(Watershed.Input.File, header=F,
skip=115, nrow=10)[, 4:24]
o.inputs
for (t in T.lo:T.hi) {
Mat8Plus_Female.mu[k, ,t] = o.inputs[,1]
Mat8Plus_Female.alphaR.N[k, ,t] = o.inputs[,2]
Mat8Plus_Female.alphaT.N[k, ,t] = o.inputs[,3]
Mat8Plus_Female.alphaS.N[k, ,t] = o.inputs[,4]
Mat8Plus_Female.alpha.N[k, ,t] = o.inputs[,5]
Mat8Plus_Female.target[k, ,t] = o.inputs[,16]
Mat8Plus_Female.rate[k, ,t] = o.inputs[,17]
Mat8Plus_Male.mu[k, ,t] = o.inputs[,6]
Mat8Plus_Male.alphaR.N[k, ,t] = o.inputs[,7]
Mat8Plus_Male.alphaT.N[k, ,t] = o.inputs[,8]
Mat8Plus_Male.alphaS.N[k, ,t] = o.inputs[,9]
Mat8Plus_Male.alpha.N[k, ,t] = o.inputs[,10]
Mat8Plus_Male.target[k, ,t] = o.inputs[,18]
Mat8Plus_Male.rate[k, ,t] = o.inputs[,19]
C_ocean.mu[k, ,t] = o.inputs[,11]
C_ocean.sigmaR[k, ,t] = o.inputs[,12]
C_ocean.sigmaT[k, ,t] = o.inputs[,13]
C_ocean.sigmaS[k, ,t] = o.inputs[,14]
C_ocean.sigma[k, ,t] = o.inputs[,15]
C_ocean.target[k, ,t] = o.inputs[, 20]
C_ocean.rate[k, ,t] = o.inputs[,21]
}
rm(o.inputs)
### read "frac"
#fractions = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=128, endRow=(128+5-1),colClasses=rep("numeric", 84),
## rowIndex=128:(128+5-1),
# colIndex=4:87, header=F)
fractions = read.csv(Watershed.Input.File, header=F,
skip=130, nrows = 5)[,4:87]
fractions
#dim(frac.mu)
for (t in T.lo:T.hi) {
for (j in 1:J) {
frac.mu[k, ,j,t] = fractions[,j]
frac.sigmaR[k, ,j,t] = fractions[,j+12]
frac.sigmaT[k, ,j,t] = fractions[,j+24]
frac.sigmaS[k, ,j,t] = fractions[,j+36]
frac.sigma[k, ,j,t] = fractions[,j+48]
frac.target[k, ,j,t] = fractions[,j+60]
frac.rate[k, , j,t] = fractions[,j+72]
} # close j
} #close t
frac.rate[k, , ,t]
rm(fractions)
dim(frac.mu)
frac.mu[1,,,1:10]
frac.target[1,,,1:10]
###################
#harvest = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow = 137, endRow=138, colClasses=rep("numeric", 7),
## rowIndex=137:138,
# colIndex=3:9, header=F)
harvest = read.csv(Watershed.Input.File, header=F,
skip = 139, nrows = 2)[, 3:9]
#harvest
for (t in T.lo:T.hi) {
harvest.wild.mu[k,t] = harvest[1,1]
harvest.wild.sigmaR[k,t] = harvest[1,2]
harvest.wild.sigmaT[k,t] = harvest[1,3]
harvest.wild.sigmaS[k,t] = harvest[1,4]
harvest.wild.sigma[k,t] = harvest[1,5]
harvest.wild.target[k,t] = harvest[1,6]
harvest.wild.rate[k,t] = harvest[1,7]
harvest.hatch.mu[k,t] = harvest[2,1]
harvest.hatch.sigmaR[k,t] = harvest[2,2]
harvest.hatch.sigmaT[k,t] = harvest[2,3]
harvest.hatch.sigmaS[k,t] = harvest[2,4]
harvest.hatch.sigma[k,t] = harvest[2,5]
harvest.hatch.target[k,t] = harvest[2,6]
harvest.hatch.rate[k,t] = harvest[2,7]
} # close t
rm(harvest)
################################################
#Hatch_Fish_Inputs = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=145, endRow=145, colClasses= rep("numeric",4),
## rowIndex=145:145,
# colIndex=4:7, header=F)
Hatch_Fish_Inputs = read.csv(Watershed.Input.File, header=F,
skip=147, nrows=1)[, 2:4]
#dim(Hatch_Fish_Inputs)
for (t in T.lo:T.hi) {
Hatch_Fish.mu[k, 1:2, t]=0
Hatch_Fish.mu[k, 6:I, t] = 0
for (i in 3:5) {
Hatch_Fish.mu[k,i,t]= Hatch_Fish_Inputs[1, i-2]
}
}
#Rel_Surv_Inputs = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=149, endRow=160, colClasses=rep("numeric",8),
## rowIndex=149:160,
# colIndex=4:10, header=F)
Rel_Surv_Inputs = read.csv(Watershed.Input.File, header=F,
skip=152, nrow=11)[, 4:9] #Pete Feb 2016, this be nrow = 11 not 12, right?
# skip=152, nrow=12)[, 4:9] #Pete Feb 2016, this be nrow = 11 not 12, right?
#Rel_Surv_Inputs
# Will add variability at a later time ---M@
for (t in T.lo:T.hi) {
for (g in 1:G) {
for (i in 1:I) {
# the "min" is used below to assign all adult stages the same Rel_Surv
# and Rel_Comp values
Rel_Surv.mu[k,i,t,g]<-(Rel_Surv_Inputs[g, min(i,6)])
#Rel_Comp.mu[k,i,t,g]<-Rel_Comp_Inputs[g, min(i,6)]
#Rel_Comp.mu[k,i,t,g]
#Rel_Comp_Inputs[g, min(i,6)]
}
}
}
Rel_Surv_Inputs
Rel_Surv_Inputs[g, min(i,6)]
Rel_Surv.mu[k,,t,]
rm(Rel_Surv_Inputs)
#rm(Rel_Comp_Inputs)
Fecund_Inputs = read.csv(Watershed.Input.File, header=F,
skip=168, nrow=11)[, 4:13]
for (t in T.lo:T.hi) {
Female_Fecundity[k,,t,] = t(Fecund_Inputs)
}
rm(Fecund_Inputs)
#Post_Spawn_Survival_Anadromous = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
#Post_Spawn_Survival_Rainbow = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
Post_Spawn_Survival_Anadromous_Inputs =
read.csv(Watershed.Input.File, header=F,
skip=184, nrow=11)[, 4:23]
for (t in T.lo:T.hi) {
Post_Spawn_Survival_Anadromous_M.mu[k,,t,] = t(Post_Spawn_Survival_Anadromous_Inputs[,1:10])
Post_Spawn_Survival_Anadromous_F.mu[k,,t,] = t(Post_Spawn_Survival_Anadromous_Inputs[,11:20])
}
rm(Post_Spawn_Survival_Anadromous_Inputs)
Post_Spawn_Survival_Rainbow_Inputs =
read.csv(Watershed.Input.File, header=F,
skip=199, nrow=11)[, 4:23] #Pete October 2015 Fix--was previously referencing the wrong row...
for (t in T.lo:T.hi) {
Post_Spawn_Survival_Rainbow_M.mu[k,,t,] = t(Post_Spawn_Survival_Rainbow_Inputs[,1:10])
Post_Spawn_Survival_Rainbow_F.mu[k,,t,] = t(Post_Spawn_Survival_Rainbow_Inputs[,11:20])
}
rm(Post_Spawn_Survival_Rainbow_Inputs)
} # close site
# Cross Site Migration Matrix
Cross.Site.Mig = read.csv(as.character(Cross.site.migration.file.names[n.step]), header=F,
skip= 6, nrows=43)[, 3:27]
Cross.Site.Mig
#Cross.Site.Mig = read.xlsx2("Cross_Site_Migration.csv",
# startRow = 8, endRow=50, colClasses = rep("numeric", 25),
## rowIndex=8:50,
# colIndex=4:28, header=F,)
Cross.Site.Mig
for (t in T.lo:T.hi) {
for (k1 in 1:K) {
for (k2 in 1:K) {
Fry.x.siteMigration.mu[k1, k2,t] = Cross.Site.Mig[k1,k2]
Par.x.siteMigration.mu[k1, k2,t] = Cross.Site.Mig[k1+11,k2]
Presmolt.x.siteMigration.mu[k1, k2,t] = Cross.Site.Mig[k1+22,k2]
Spawner.x.siteMigration.mu[k1, k2,t] = Cross.Site.Mig[k1+33,k2]
Fry.x.siteMigration.target[k1, k2,t] = Cross.Site.Mig[k1,k2+14]
Par.x.siteMigration.target[k1, k2,t] = Cross.Site.Mig[k1+11,k2+14]
Presmolt.x.siteMigration.target[k1, k2,t] = Cross.Site.Mig[k1+22,k2+14]
Spawner.x.siteMigration.target[k1, k2,t] = Cross.Site.Mig[k1+33,k2+14]
}
Fry.x.siteMigration.alphaR[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1, 11]))
Fry.x.siteMigration.alphaT[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1, 12]))
Fry.x.siteMigration.alphaS[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1, 13]))
Fry.x.siteMigration.alpha[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1, 14]))
Fry.x.siteMigration.rate[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1, 25]))
Par.x.siteMigration.alphaR[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+11,11]))
Par.x.siteMigration.alphaT[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+11,12]))
Par.x.siteMigration.alphaS[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+11,13]))
Par.x.siteMigration.alpha[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+11,14]))
Par.x.siteMigration.rate[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1+11, 25]))
Presmolt.x.siteMigration.alphaR[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+22,11]))
Presmolt.x.siteMigration.alphaT[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+22,12]))
Presmolt.x.siteMigration.alphaS[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+22,13]))
Presmolt.x.siteMigration.alpha[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+22,14]))
Presmolt.x.siteMigration.rate[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1+22, 25]))
Spawner.x.siteMigration.alphaR[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+33,11]))
Spawner.x.siteMigration.alphaT[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+33,12]))
Spawner.x.siteMigration.alphaS[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+33,13]))
Spawner.x.siteMigration.alpha[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+33,14]))
Spawner.x.siteMigration.rate[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1+33, 25]))
}}
Fry.x.siteMigration.target
Fry.x.siteMigration.alphaT
} # close cycling through number of input files
# Need to return EVERYTHING!!!
Inputs =
list(
"frac.mu"=frac.mu, "frac.sigmaR"=frac.sigmaR, "frac.sigmaT"=frac.sigmaT,
"frac.sigmaS"=frac.sigmaS, "frac.sigma"=frac.sigma,
"frac.target" = frac.target, "frac.rate" = frac.rate,
"harvest.wild.mu"= harvest.wild.mu,
"harvest.wild.sigmaR"= harvest.wild.sigmaR,
"harvest.wild.sigmaT"= harvest.wild.sigmaT,
"harvest.wild.sigmaS"= harvest.wild.sigmaS,
"harvest.wild.sigma"= harvest.wild.sigma,
"harvest.wild.target" = harvest.wild.target,
"harvest.wild.rate" = harvest.wild.rate,
"harvest.hatch.mu"= harvest.hatch.mu,
"harvest.hatch.sigmaR"= harvest.hatch.sigmaR,
"harvest.hatch.sigmaT"= harvest.hatch.sigmaT,
"harvest.hatch.sigmaS"= harvest.hatch.sigmaS,
"harvest.hatch.sigma"= harvest.hatch.sigma,
"harvest.hatch.target" = harvest.hatch.target,
"harvest.hatch.rate" = harvest.hatch.rate,
"Prod_Scalar.mu"=Prod_Scalar.mu, "Prod_Scalar.sigmaR"=Prod_Scalar.sigmaR,
"Prod_Scalar.sigmaT"=Prod_Scalar.sigmaT,
"Prod_Scalar.sigmaS"=Prod_Scalar.sigmaS,
"Prod_Scalar.sigma"=Prod_Scalar.sigma,
"Prod_Scalar.target" = Prod_Scalar.target,
"Prod_Scalar.rate" = Prod_Scalar.rate,
"M.mu"= M.mu, "M.alphaR.N" = M.alphaR.N,
"M.alphaT.N" = M.alphaT.N, "M.alphaS.N" = M.alphaS.N,
"M.alpha.N" = M.alpha.N,
"M.target"=M.target, "M.rate" = M.rate,
"Ak_x_Lqk.mu"=Ak_x_Lqk.mu, "Ak_x_Lqk.sigmaR"=Ak_x_Lqk.sigmaR,
"Ak_x_Lqk.sigmaT"=Ak_x_Lqk.sigmaT,"Ak_x_Lqk.sigmaS"=Ak_x_Lqk.sigmaS,
"Ak_x_Lqk.sigma"=Ak_x_Lqk.sigma,
"Ak_x_Lqk.target"=Ak_x_Lqk.target,
"Ak_x_Lqk.rate"= Ak_x_Lqk.rate,
"D.mu"= D.mu, "D.sigmaR" = D.sigmaR, "D.sigmaT" = D.sigmaT,
"D.sigmaS" = D.sigmaS, "D.sigma" = D.sigma,
"D.target" = D.target, "D.rate" = D.rate,
"Sr.mu" = Sr.mu, "Sr.alphaR.N" = Sr.alphaR.N, "Sr.alphaT.N" = Sr.alphaT.N,
"Sr.alphaS.N" = Sr.alphaS.N, "Sr.alpha.N" = Sr.alpha.N,
"Sr.target" = Sr.target, "Sr.rate"=Sr.rate,
"C_ocean.mu" = C_ocean.mu, "C_ocean.sigmaR" = C_ocean.sigmaR,
"C_ocean.sigmaT" = C_ocean.sigmaT,
"C_ocean.sigmaS" = C_ocean.sigmaS, "C_ocean.sigma" = C_ocean.sigma,
"C_ocean.target" = C_ocean.target, "C_ocean.rate" = C_ocean.rate,
"SR5.mu" = SR5.mu, "SR5.alphaR.N" = SR5.alphaR, "SR5.alphaT.N" = SR5.alphaT,
"SR5.alphaS.N" = SR5.alphaS, "SR5.alpha.N" = SR5.alpha,
"SR5.target" = SR5.target, "SR5.rate" = SR5.rate,
"N5.Psmolt_Female.mu" = N5.Psmolt_Female.mu,
"N5.Pspawn_Female.mu" = N5.Pspawn_Female.mu,
"N5.Pstay_Female.mu" = N5.Pstay_Female.mu,
"N5.Psmolt_Female.target" = N5.Psmolt_Female.target,
"N5.Pspawn_Female.target" = N5.Pspawn_Female.target,
"N5.Pstay_Female.target" = N5.Pstay_Female.target,
"N5.P_Female.rate" = N5.P_Female.rate,
"N5.P.alphaR_Female.N" = N5.P.alphaR_Female.N,
"N5.P.alphaT_Female.N" = N5.P.alphaT_Female.N,
"N5.P.alphaS_Female.N" = N5.P.alphaS_Female.N,
"N5.P.alpha_Female.N" = N5.P.alpha_Female.N,
"N5.Psmolt_Male.mu" = N5.Psmolt_Male.mu,
"N5.Pspawn_Male.mu" = N5.Pspawn_Male.mu,
"N5.Pstay_Male.mu" = N5.Pstay_Male.mu,
"N5.Psmolt_Male.target" = N5.Psmolt_Male.target,
"N5.Pspawn_Male.target" = N5.Pspawn_Male.target,
"N5.Pstay_Male.target" = N5.Pstay_Male.target,
"N5.P_Male.rate" = N5.P_Male.rate,
"N5.P.alphaR_Male.N" = N5.P.alphaR_Male.N,
"N5.P.alphaT_Male.N" = N5.P.alphaT_Male.N,
"N5.P.alphaS_Male.N" = N5.P.alphaS_Male.N,
"N5.P.alpha_Male.N" = N5.P.alpha_Male.N,
"N5.cap.mu" = N5.cap.mu, "N5.cap.sigmaR" = N5.cap.sigmaR,
"N5.cap.sigmaT" = N5.cap.sigmaT,"N5.cap.sigmaS" = N5.cap.sigmaS,
"N5.cap.sigma" = N5.cap.sigma,
"N5.cap.target" = N5.cap.target, "N5.cap.rate" = N5.cap.rate,
"Mat8Plus_Female.mu" = Mat8Plus_Female.mu,
"Mat8Plus_Female.alphaR.N" = Mat8Plus_Female.alphaR.N,
"Mat8Plus_Female.alphaT.N" = Mat8Plus_Female.alphaT.N,
"Mat8Plus_Female.alphaS.N" = Mat8Plus_Female.alphaS.N,
"Mat8Plus_Female.alpha.N" = Mat8Plus_Female.alpha.N,
"Mat8Plus_Female.target" = Mat8Plus_Female.target,
"Mat8Plus_Female.rate" = Mat8Plus_Female.rate,
"Mat8Plus_Male.mu" = Mat8Plus_Male.mu,
"Mat8Plus_Male.alphaR.N" = Mat8Plus_Male.alphaR.N,
"Mat8Plus_Male.alphaT.N" = Mat8Plus_Male.alphaT.N,
"Mat8Plus_Male.alphaS.N" = Mat8Plus_Male.alphaS.N,
"Mat8Plus_Male.alpha.N" = Mat8Plus_Male.alpha.N,
"Mat8Plus_Male.target" = Mat8Plus_Male.target,
"Mat8Plus_Male.rate" = Mat8Plus_Male.rate,
### will add variabilities here later for below, if needed/wanted....
"Hatch_Fish.mu"=Hatch_Fish.mu,
"Rel_Surv.mu"=Rel_Surv.mu,
"Rel_Comp.mu"=Rel_Comp.mu,
"Rel_Fecund.mu"=Rel_Fecund.mu,
"Female_Fecundity.mu"=Female_Fecundity,
"Post_Spawn_Survival_Anadromous_M.mu" = Post_Spawn_Survival_Anadromous_M.mu,
"Post_Spawn_Survival_Anadromous_F.mu" = Post_Spawn_Survival_Anadromous_F.mu,
"Post_Spawn_Survival_Rainbow_M.mu" = Post_Spawn_Survival_Rainbow_M.mu ,
"Post_Spawn_Survival_Rainbow_F.mu" = Post_Spawn_Survival_Rainbow_F.mu ,
#"Female_Frac.mu"= Female_Frac.mu,
"Fry.x.siteMigration.mu"=Fry.x.siteMigration.mu,
"Par.x.siteMigration.mu"=Par.x.siteMigration.mu,
"Presmolt.x.siteMigration.mu"=Presmolt.x.siteMigration.mu,
"Spawner.x.siteMigration.mu"=Spawner.x.siteMigration.mu,
"Fry.x.siteMigration.target"=Fry.x.siteMigration.target,
"Par.x.siteMigration.target"=Par.x.siteMigration.target,
"Presmolt.x.siteMigration.target"=Presmolt.x.siteMigration.target,
"Spawner.x.siteMigration.target"=Spawner.x.siteMigration.target,
"Fry.x.siteMigration.alphaR.N" = Fry.x.siteMigration.alphaR,
"Fry.x.siteMigration.alphaT.N" =Fry.x.siteMigration.alphaT,
"Fry.x.siteMigration.alphaS.N" = Fry.x.siteMigration.alphaS,
"Fry.x.siteMigration.alpha.N" = Fry.x.siteMigration.alpha,
"Fry.x.siteMigration.rate" = Fry.x.siteMigration.rate,
"Par.x.siteMigration.alphaR.N" = Par.x.siteMigration.alphaR,
"Par.x.siteMigration.alphaT.N" = Par.x.siteMigration.alphaT,
"Par.x.siteMigration.alphaS.N" = Par.x.siteMigration.alphaS,
"Par.x.siteMigration.alpha.N" = Par.x.siteMigration.alpha,
"Par.x.siteMigration.rate" = Par.x.siteMigration.rate,
"Presmolt.x.siteMigration.alphaR.N" = Presmolt.x.siteMigration.alphaR,
"Presmolt.x.siteMigration.alphaT.N" = Presmolt.x.siteMigration.alphaT,
"Presmolt.x.siteMigration.alphaS.N" = Presmolt.x.siteMigration.alphaS,
"Presmolt.x.siteMigration.alpha.N" = Presmolt.x.siteMigration.alpha,
"Presmolt.x.siteMigration.rate" = Presmolt.x.siteMigration.rate,
"Spawner.x.siteMigration.alphaR.N" = Spawner.x.siteMigration.alphaR,
"Spawner.x.siteMigration.alphaT.N" = Spawner.x.siteMigration.alphaT,
"Spawner.x.siteMigration.alphaS.N" = Spawner.x.siteMigration.alphaS,
"Spawner.x.siteMigration.alpha.N" = Spawner.x.siteMigration.alpha,
"Spawner.x.siteMigration.rate" = Spawner.x.siteMigration.rate,
"N5.Rainbow.Fecundity" = N5.Rainbow.Fecundity
)
Inputs
detach(header)
return(Inputs)
}
# End of Read Data Function
#### End of Function #################
######################################
######################################
#######
#header<- Read.Header("Watershed_Header_File.xlsx")
#Inputs<-Read.Input.File(header)
| /MFJD Steelhead/Watershed_ReadData.R | permissive | petemchugh/ISEMP_WatMod | R | false | false | 34,193 | r |
##################################################################
Read.Header <- function(Header.File) {
##################################################################
I=17 #(number of life stages) ### Need to Read This in DATA FILE
I5=10 # Number of potential pre-smolt years (Steelhead)
Header.File = "Watershed_Header_File.csv"
# Read in Watershed Information Table 2.3
#WshedKJQ=read.xlsx2(Header.File, sheetName="Header",
# startRow=2, endRow=11, colClasses=c("numeric"),
# colIndex=3:3, header=F)
WshedKJQ = read.csv(Header.File, skip=1, nrows=10, header=F)[,3]
WshedKJQ
#K, Q, and J: Number of Sites, Land Use Cats per site, Habitat Types per Site
K=as.numeric(WshedKJQ[1])
N.input.files=as.numeric(WshedKJQ[2])
Q=as.numeric(WshedKJQ[3])
J=as.numeric(WshedKJQ[4])
G=11
Tr=as.numeric(WshedKJQ[5])
R=as.numeric(WshedKJQ[6])
MCsim1=as.numeric(WshedKJQ[7])
MCsim2=as.numeric(WshedKJQ[8])
MCsim3=as.numeric(WshedKJQ[9])
MCsim4=as.numeric(WshedKJQ[10])
rm(WshedKJQ)
N.input.files
Tr
#T.step.change = read.xlsx2(Header.File, sheetName="Header",
# startRow=16, endRow=round(16+N.input.files-1),colIndex=3:3, header=F,
# colClasses=c("numeric"))[,1]
T.step.change = as.numeric(read.csv(Header.File,
skip=13, nrows=1, header=F)[,4:(4+N.input.files-1)])
T.step.change
# read in input file names (one input file for each step change in inputs)
#Input.file.names = as.character(read.xlsx(Header.File, sheetName="Header",
# rowIndex=16:(16+N.input.files-1), colIndex=2:2, header=F)[,1])
#Input.file.names = as.character(read.xlsx2(Header.File, sheetName="Header",
# startRow=16, endRow=(16+N.input.files-1),
# colClasses=c("character"),
#colIndex=2:2, header=F)[,1])
file.names =
as.matrix((
read.csv(Header.File,
skip = 15, nrows = K,
header=F,
colClasses = rep("character", (1+N.input.files)))[1:K, 3:(N.input.files+3)]
))
Input.file.names= array(file.names[,2:(1+N.input.files)], c(K, N.input.files))
Init.file.names = file.names[,1]
Cross.site.migration.file.names =
c(
read.csv(Header.File,
skip = 26, nrows = 1,
header=F,
colClasses = rep("character", (1+N.input.files)))[1, 4:(N.input.files+3)]
)
Cross.site.migration.file.names
Site.Names = read.csv(Header.File, skip=15, nrows=K, header=F,
colClasses="character")[,2]
Site.Names
# Will have to use Input.file.names in read data function to change to reading
# different input files for each site, rather than different worksheets
# for each seet, due to the switch to .csv files
# Won't even read site names in header file, as each input sheet
# is it's own site, so the name will be read from the input sheet
# or "site profile" directly.
#watersheds = read.xlsx(Header.File, sheetName="Header",
# rowIndex=31:(31+K-1), colIndex=2:6, header=F)
#watersheds = read.xlsx2(Header.File, sheetName="Header",
# startRow= 31, endRow=(31+K-1),
# colClasses=rep("character",5),
# colIndex=2:6, header=F)
#watersheds
# Watershed.index = as.character(watersheds[,1])
# River.index=as.character(watersheds[,2])
# Stream.index=as.character(watersheds[,3])
# Reach.index=as.character(watersheds[,4])
# Site.index=as.character(watersheds[,5])
#rm(watersheds)
return(
list(
"I"=I, "I5"=I5,"G"=G,
"K"=K, "N.input.files"=N.input.files, "Q"=Q, "J"=J, "Tr"=Tr, "R"=R,
"MCsim1"=MCsim1, "MCsim2"=MCsim2,"MCsim3"=MCsim3,"MCsim4"=MCsim4,
"T.step.change" = T.step.change,
"Input.file.names" = Input.file.names,
"Init.file.names" = Init.file.names,
"Cross.site.migration.file.names" = Cross.site.migration.file.names,
"Site.Names"=Site.Names
)
)
}# end of function
## Finished to here - updated Reading Header File from .csv.
## Need to continue, reading input file(s) from .csv as well.
## Will now have multiple input files names - will have to update
## input file name for each site.
##################################################################
##################################################################
Read.Input.File <- function(header) {
attach(header)
#attach(header)
######################
# Initialize Vectors
M.mu = array(rep(0,(K*J*Q*Tr)),c(K,Q,J,Tr))
M.target = M.mu
M.alphaR.N = array(rep(0,(K*Q*Tr)),c(K,Q,Tr))
M.alphaT.N = M.alphaR.N
M.alphaS.N = M.alphaR.N
M.alpha.N = M.alphaR.N
M.rate = M.alphaR.N
Ak_x_Lqk.mu=array(rep(0,K*Q*Tr),c(K,Q,Tr))
Ak_x_Lqk.sigmaR=Ak_x_Lqk.mu
Ak_x_Lqk.sigmaT=Ak_x_Lqk.mu
Ak_x_Lqk.sigmaS=Ak_x_Lqk.mu
Ak_x_Lqk.sigma=Ak_x_Lqk.mu
Ak_x_Lqk.target=Ak_x_Lqk.mu
Ak_x_Lqk.rate=Ak_x_Lqk.mu
D.mu=array(rep(0, K* 5*(J+1)*Tr), c(K, J, 5, Tr))
D.sigmaR=D.mu
D.sigmaT=D.mu
D.sigmaS=D.mu
D.sigma=D.mu
D.target=D.mu
D.rate = D.mu
Prod_Scalar.mu=array(rep(0, K*5*Q*Tr), c(K, Q, 5, Tr))
Prod_Scalar.sigmaR=Prod_Scalar.mu
Prod_Scalar.sigmaT=Prod_Scalar.mu
Prod_Scalar.sigmaS=Prod_Scalar.mu
Prod_Scalar.sigma=Prod_Scalar.mu
Prod_Scalar.target = Prod_Scalar.mu
Prod_Scalar.rate = Prod_Scalar.mu
Sr.mu = array(rep(0, K*I*Tr), c(K,I,Tr))
Sr.alphaR.N= Sr.mu
Sr.alphaT.N= Sr.mu
Sr.alphaS.N= Sr.mu
Sr.alpha.N = Sr.mu
Sr.target = Sr.mu
Sr.rate = Sr.mu
# Presmolt Stuff...
SR5.mu = array(rep(0, K*I5*Tr), c(K, I5, Tr))
SR5.alphaR=SR5.mu
SR5.alphaT=SR5.mu
SR5.alphaS=SR5.mu
SR5.alpha=SR5.mu
SR5.target = SR5.mu
SR5.rate = SR5.mu
N5.Psmolt_Female.mu = SR5.mu
N5.Pspawn_Female.mu = SR5.mu
N5.Pstay_Female.mu = SR5.mu
N5.P.alphaR_Female.N = SR5.mu
N5.P.alphaT_Female.N = SR5.mu
N5.P.alphaS_Female.N = SR5.mu
N5.P.alpha_Female.N = SR5.mu
N5.Psmolt_Female.target = SR5.mu
N5.Pspawn_Female.target = SR5.mu
N5.Pstay_Female.target = SR5.mu
N5.P_Female.rate = SR5.mu
N5.Psmolt_Male.mu = SR5.mu
N5.Pspawn_Male.mu = SR5.mu
N5.Pstay_Male.mu = SR5.mu
N5.P.alphaR_Male.N = SR5.mu
N5.P.alphaT_Male.N = SR5.mu
N5.P.alphaS_Male.N = SR5.mu
N5.P.alpha_Male.N = SR5.mu
N5.Psmolt_Male.target = SR5.mu
N5.Pspawn_Male.target = SR5.mu
N5.Pstay_Male.target = SR5.mu
N5.P_Male.rate = SR5.mu
N5.Rainbow.Fecundity = array(rep(0, K*I5*Tr), c(K, I5, Tr))
N5.cap.mu = SR5.mu
N5.cap.sigmaR = SR5.mu
N5.cap.sigmaT = SR5.mu
N5.cap.sigmaS = SR5.mu
N5.cap.sigma = SR5.mu
N5.cap.target = SR5.mu
N5.cap.rate = SR5.mu
# Adult (ocean) fish by ocean age parameters (track up to 10 ocean years)
Mat8Plus_Female.mu = array(rep(0,K*10*Tr), c(K, 10, Tr))
Mat8Plus_Female.alphaR.N = Mat8Plus_Female.mu
Mat8Plus_Female.alphaT.N = Mat8Plus_Female.mu
Mat8Plus_Female.alphaS.N = Mat8Plus_Female.mu
Mat8Plus_Female.alpha.N = Mat8Plus_Female.mu
Mat8Plus_Female.target = Mat8Plus_Female.mu
Mat8Plus_Female.rate = Mat8Plus_Female.mu
Mat8Plus_Male.mu = array(rep(0,K*10*Tr), c(K, 10, Tr))
Mat8Plus_Male.alphaR.N = Mat8Plus_Female.mu
Mat8Plus_Male.alphaT.N = Mat8Plus_Female.mu
Mat8Plus_Male.alphaS.N = Mat8Plus_Female.mu
Mat8Plus_Male.alpha.N = Mat8Plus_Female.mu
Mat8Plus_Male.target = Mat8Plus_Female.mu
Mat8Plus_Male.rate = Mat8Plus_Female.mu
# Fc.by.O.Age.mu = Mat8Plus.mu
# Fc.by.O.Age.sigmaR = Mat8Plus.mu
# Fc.by.O.Age.sigmaT = Mat8Plus.mu
# Fc.by.O.Age.sigmaS = Mat8Plus.mu
# Fc.by.O.Age.sigma = Mat8Plus.mu
# Fc.by.O.Age.target = Fc.by.O.Age.mu
# Fc.by.O.Age.rate = Fc.by.O.Age.mu
C_ocean.mu = Mat8Plus_Female.mu
C_ocean.sigmaR = Mat8Plus_Female.mu
C_ocean.sigmaT = Mat8Plus_Female.mu
C_ocean.sigmaS = Mat8Plus_Female.mu
C_ocean.sigma = Mat8Plus_Female.mu
C_ocean.target = C_ocean.mu
C_ocean.rate = C_ocean.mu
# frac
frac.mu = array(rep(0, K*5*(J)*Tr), c(K, 5, (J), Tr))
frac.sigmaR = frac.mu
frac.sigmaT = frac.mu
frac.sigmaS = frac.mu
frac.sigma = frac.mu
frac.target = frac.mu
frac.rate = frac.mu
harvest.wild.mu = array(rep(0,K*Tr), c(K, Tr))
harvest.wild.sigmaR = harvest.wild.mu
harvest.wild.sigmaT = harvest.wild.mu
harvest.wild.sigmaS = harvest.wild.mu
harvest.wild.sigma = harvest.wild.mu
harvest.hatch.mu = harvest.wild.mu
harvest.hatch.sigmaR = harvest.wild.mu
harvest.hatch.sigmaT = harvest.wild.mu
harvest.hatch.sigmaS = harvest.wild.mu
harvest.hatch.sigma = harvest.wild.mu
harvest.wild.target = harvest.wild.mu
harvest.wild.rate = harvest.wild.mu
harvest.hatch.target = harvest.wild.mu
harvest.hatch.rate = harvest.wild.mu
Hatch_Fish.mu = array(rep(0, K*I*Tr), c(K, I, Tr))
Hatch_Fish.sigmaR = Hatch_Fish.mu
Hatch_Fish.sigmaT = Hatch_Fish.mu
Hatch_Fish.sigmaS = Hatch_Fish.mu
Hatch_Fish.sigma = Hatch_Fish.mu
Hatch_Fish.target = Hatch_Fish.mu
Hatch_Fish.rate = Hatch_Fish.mu
# Rel_Surv (G categories)
Rel_Surv.mu = array(rep(0, K*I*Tr*G), c(K, I, Tr, G))
Rel_Surv.sigmaR = Rel_Surv.mu
Rel_Surv.sigmaT = Rel_Surv.mu
Rel_Surv.sigmaS = Rel_Surv.mu
Rel_Surv.sigma = Rel_Surv.mu
Rel_Surv.target= Rel_Surv.mu
Rel_Surv.rate= Rel_Surv.mu
#Male Female Ratio
Post_Spawn_Survival_Anadromous_M.mu = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
Post_Spawn_Survival_Anadromous_F.mu = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
Post_Spawn_Survival_Rainbow_M.mu = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
Post_Spawn_Survival_Rainbow_F.mu = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
#Female_Frac.mu = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
#Fecundity of Female Spawners by Ocean Age)
Female_Fecundity = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
# Rel_Com (G categories)
Rel_Comp.mu = array(rep(0, K*I*Tr*G), c(K, I, Tr, G))
Rel_Comp.sigmaR = Rel_Comp.mu
Rel_Comp.sigmaT = Rel_Comp.mu
Rel_Comp.sigmaS = Rel_Comp.mu
Rel_Comp.sigma = Rel_Comp.mu
Rel_Comp.target= Rel_Comp.mu
Rel_Comp.rate= Rel_Comp.mu
# Rel_Fecund
Rel_Fecund.mu = array(rep(0,K*Tr*G), c(K, Tr, G))
Rel_Fecund.simgaR = Rel_Fecund.mu
Rel_Fecund.simgaT = Rel_Fecund.mu
Rel_Fecund.simgaS = Rel_Fecund.mu
Rel_Fecund.simga = Rel_Fecund.mu
Rel_Fecund.target = Rel_Fecund.mu
Rel_Fecund.rate = Rel_Fecund.mu
Fry.x.siteMigration.mu = array(rep(0, K*K*Tr), c(K,K,Tr))
Par.x.siteMigration.mu = array(rep(0, K*K*Tr), c(K,K,Tr))
Presmolt.x.siteMigration.mu = array(rep(0, K*K*Tr), c(K,K,Tr))
Spawner.x.siteMigration.mu = array(rep(0, K*K*Tr), c(K,K,Tr))
Fry.x.siteMigration.target = array(rep(0, K*K*Tr), c(K,K,Tr))
Par.x.siteMigration.target = array(rep(0, K*K*Tr), c(K,K,Tr))
Presmolt.x.siteMigration.target = array(rep(0, K*K*Tr), c(K,K,Tr))
Spawner.x.siteMigration.target = array(rep(0, K*K*Tr), c(K,K,Tr))
Fry.x.siteMigration.alphaR = array(rep(0, K*Tr), c(K, Tr))
Fry.x.siteMigration.alphaT = array(rep(0,K*Tr), c(K, Tr))
Fry.x.siteMigration.alphaS = array(rep(0,K*Tr), c(K, Tr))
Fry.x.siteMigration.alpha = array(rep(0,K*Tr), c(K, Tr))
Par.x.siteMigration.alphaR = array(rep(0,K*Tr), c(K, Tr))
Par.x.siteMigration.alphaT = array(rep(0,K*Tr), c(K, Tr))
Par.x.siteMigration.alphaS = array(rep(0,K*Tr), c(K, Tr))
Par.x.siteMigration.alpha = array(rep(0,K*Tr), c(K, Tr))
Presmolt.x.siteMigration.alphaR = array(rep(0,K*Tr), c(K, Tr))
Presmolt.x.siteMigration.alphaT = array(rep(0,K*Tr), c(K, Tr))
Presmolt.x.siteMigration.alphaS = array(rep(0,K*Tr), c(K, Tr))
Presmolt.x.siteMigration.alpha = array(rep(0,K*Tr), c(K, Tr))
Spawner.x.siteMigration.alphaR = array(rep(0,K*Tr), c(K, Tr))
Spawner.x.siteMigration.alphaT = array(rep(0,K*Tr), c(K, Tr))
Spawner.x.siteMigration.alphaS = array(rep(0,K*Tr), c(K, Tr))
Spawner.x.siteMigration.alpha = array(rep(0,K*Tr), c(K, Tr))
Fry.x.siteMigration.rate = array(rep(0, K*Tr), c(K, Tr))
Par.x.siteMigration.rate = array(rep(0, K*Tr), c(K, Tr))
Presmolt.x.siteMigration.rate = array(rep(0, K*Tr), c(K, Tr))
Spawner.x.siteMigration.rate = array(rep(0, K*Tr), c(K, Tr))
###########################
################################
################################
# loop through each site within input file
k=1
n.step=1
for (n.step in 1:N.input.files) {
T.lo= as.numeric(T.step.change[n.step])
if (n.step==N.input.files) {T.hi=Tr} else {T.hi= as.numeric(T.step.change[n.step+1])-1}
T.lo
T.hi
n.step
N.input.files
T.step.change
# loop through each input file
#Watershed.Input.File=Input.file.names[n.step]
for (k in 1:K) {
#print(k)
# Site=paste("Site",k,sep="")
Watershed.Input.File = as.character(Input.file.names[k, n.step])
# Read the M's
#T2.3 <-read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=26, endRow=(26+Q-1), colClasses=rep("numeric",J),
# rowIndex=26:(26+Q-1),
# colIndex=3:(3+J-1), header=F)
T2.3 <- as.matrix(read.csv(Watershed.Input.File, header=F,skip=27, nrows=Q)[,3:(3+J-1)])
T2.3
#T2.3Nalpha <- read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=26, endRow=(26+Q-1), colClasses=rep("numeric",4),
## rowIndex=26:(26+Q-1),
# colIndex=15:18, header=F)
T2.3Nalpha <- as.matrix(read.csv(Watershed.Input.File, header=F,
skip=27, nrows=Q)[,15:18])
T2.3Nalpha
#T2.3target <- read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=26, endRow=(26+Q-1), colClasses= rep("numeric",13),
## rowIndex=26:(26+Q-1),
# colIndex=19:31, header=F)
T2.3target <- as.matrix(read.csv(Watershed.Input.File, header=F,
skip=27, nrows=Q)[,19:(31)])
T2.3target
#T2.3rate <- read.xlsx(Watershed.Input.File, sheetName=Site,
# rowIndex=26:(26+Q-1), colIndex=19:3, header=F)
T2.3rate <- read.csv(Watershed.Input.File, header=F,
skip=27, nrows=Q)[,31]
T2.3rate
#as.numeric(T.lo):as.numeric(T.hi)
for (t in as.numeric(T.lo):as.numeric(T.hi)) {
M.alphaR.N[k,,t]=T2.3Nalpha[,1]
M.alphaT.N[k,,t]=T2.3Nalpha[,2]
M.alphaS.N[k,,t]=T2.3Nalpha[,3]
M.alpha.N[k,,t]= T2.3Nalpha[,4]
M.rate[k,,t]=T2.3target[,13]
for (j in 1:J) {
M.mu[k,,j,t]=as.numeric(T2.3[,j])
M.target[k,,j,t] = as.numeric(T2.3target[,j])
} #close j
}# close t
dim(M.mu)
M.mu[1,1,1,1:10]
M.target[1,1,1,1:10]
M.alphaR.N[k,,t]
#} # close site
#} # close time
##### OK to here... repeat for all other variables (7/8/2013)
#######################################################
# Read Ak_x_Lqk_vectors
#Ak <-read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=9, endRow=(9+Q-1), colClasses=rep("numeric",7),
## rowIndex=9:(9+Q-1),
#colIndex=3:9, header=F)
Ak <- read.csv(Watershed.Input.File, header=F,
skip=9, nrows=Q)[,3:9]
Ak
for (t in T.lo:T.hi) {
Ak_x_Lqk.mu[k, ,t] <-Ak[,1]
Ak_x_Lqk.sigmaR[k, ,t] <-Ak[,2]
Ak_x_Lqk.sigmaT[k, ,t] <-Ak[,3]
Ak_x_Lqk.sigmaS[k, ,t] <-Ak[,4]
Ak_x_Lqk.sigma[k, ,t] <-Ak[,5]
Ak_x_Lqk.target[k, ,t] <-Ak[,6]
Ak_x_Lqk.rate[k, ,t] <- Ak[,7]
} # end t
dim(Ak_x_Lqk.mu)
Ak_x_Lqk.mu[,,1:10]
Ak_x_Lqk.target[,,1:10]
rm(Ak)
#### OK to here 7/8/2013 3:03 pm #######
#########################################
# Read in Table 2_4 (to get to D matrix)
#Dtable= read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=43, endRow=55, colClasses=rep("numeric",35),
## rowIndex=43:55,
#colIndex=3:37, header=F)
Dtable = read.csv(Watershed.Input.File, header=F,
skip=44, nrows=12)[,3:37]
# Note - this has been updated so that capcity of spawning gravel is input directly in "spawner to egg" category
for (t in T.lo:T.hi) {
for (i in 1:5) {
D.mu[k,1:J,i,t] = Dtable[1:J,i]
#D.mu[k, (J+1),i ,t] = Dtable[13, i]
D.sigmaR[k,1:J,i,t] = Dtable[1:J,(i+5)]
#D.sigmaR[k, (J+1),i ,t] = Dtable[13, (i+5)]
D.sigmaT[k,1:J,i,t] = Dtable[1:J,(i+10)]
#D.sigmaT[k, (J+1),i ,t] = Dtable[13, (i+10)]
D.sigmaS[k,1:J,i,t] = Dtable[1:J,(i+15)]
#D.sigmaS[k, (J+1),i ,t] = Dtable[13, (i+15)]
D.sigma[k,1:J,i,t] = Dtable[1:J,(i+20)]
#D.sigma[k, (J+1),i ,t] = Dtable[13,(i+20)]
D.target[k,1:J,i,t] = Dtable[1:J,(i+25)]
#D.target[k,(J+1),i,t] = Dtable[13,(i+25)]
D.rate[k,1:J,i,t] = Dtable[1:J, (i+30)]
#D.rate[k,(J+1),i,t] = Dtable[13, (i+30)]
}
}
D.mu[1,1,1,1:10]
D.target[1,1,1,1:10]
rm(Dtable)
####### Productivity Scalars
#Etable= read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=61, endRow=(61+Q-1), colClasses=rep("numeric",36),
## rowIndex=61:(61+Q-1),
# colIndex=3:38, header=F)
Etable = read.csv(Watershed.Input.File, header=F,
skip=62, nrows=Q)[,3:37]
Etable
for (t in T.lo:T.hi) {
for (i in 1:5) {
Prod_Scalar.mu[k,1:Q,i,t] = Etable[1:Q,i]
Prod_Scalar.sigmaR[k,1:Q,i,t] = Etable[1:Q,(i+5)]
Prod_Scalar.sigmaT[k,1:Q,i,t] = Etable[1:Q,(i+10)]
Prod_Scalar.sigmaS[k,1:Q,i,t] = Etable[1:Q,(i+15)]
Prod_Scalar.sigma[k,1:Q,i,t] = Etable[1:Q,(i+20)]
Prod_Scalar.target[k,1:Q,i,t] = Etable[1:Q,(i+25)]
Prod_Scalar.rate[k,1:Q,i,t] = Etable[1:Q,(i+30)]
} #close i
} # close t
rm(Etable)
Prod_Scalar.mu[1,1,1,1:10]
Prod_Scalar.target[1,1,1,1:10]
#?read.xlsx
#SrTable = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=78, endRow=(78+I-1-1), colClasses=rep("numeric",7),
## rowIndex=78:(78+I-1-1),
# colIndex=4:10, header=F)
SrTable = read.csv(Watershed.Input.File, header=F,
skip= 79, nrows = I-1)[,4:10]
SrTable
for (t in T.lo:T.hi) {
Sr.mu[k,2:I ,t] = (SrTable[,1])
Sr.alphaR.N[k,2:I ,t]= SrTable[,2]
Sr.alphaT.N[k,2:I ,t]= SrTable[,3]
Sr.alphaS.N[k,2:I ,t]= SrTable[,4]
Sr.alpha.N[k,2:I ,t]= SrTable[,5]
Sr.target[k, 2:I, t] = SrTable[,6]
Sr.rate[k, 2:I, t] = SrTable[,7]
}
rm(SrTable)
dim(Sr.mu)
Sr.mu[1,1:5,1:10]
Sr.target[1,1:5, 1:10]
### Presmolt Inputs
#PSinputs = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=99, endRow=(99+I5-1),colClasses=rep("numeric", 26),
## rowIndex=99:(99+I5-1),
# colIndex=3:28, header=F)
PSinputs = read.csv(Watershed.Input.File, header=F,
skip=100, nrows = I5)[, 3:39]
PSinputs
for (t in T.lo:T.hi) {
SR5.mu[k, ,t] = PSinputs[,1]
SR5.alphaR[k, ,t] = PSinputs[,2]
SR5.alphaT[k, ,t] = PSinputs[,3]
SR5.alphaS[k, ,t]= PSinputs[,4]
SR5.alpha[k, ,t]= PSinputs[,5]
SR5.target[k, ,t] = PSinputs[,25]
SR5.rate[k, ,t] = PSinputs[,26]
N5.Psmolt_Female.mu[k, ,t]= PSinputs[,6]
N5.Pspawn_Female.mu[k, ,t] = PSinputs[,7]
N5.Pstay_Female.mu[k, ,t] = PSinputs[,8]
N5.P.alphaR_Female.N[k, ,t]= PSinputs[,9]
N5.P.alphaT_Female.N[k, ,t]= PSinputs[,10]
N5.P.alphaS_Female.N[k, ,t]= PSinputs[,11]
N5.P.alpha_Female.N[k, ,t] = PSinputs[,12]
N5.Psmolt_Male.mu[k, ,t]= PSinputs[,13]
N5.Pspawn_Male.mu[k, ,t] = PSinputs[,14]
N5.Pstay_Male.mu[k, ,t] = PSinputs[,15]
N5.P.alphaR_Male.N[k, ,t]= PSinputs[,16]
N5.P.alphaT_Male.N[k, ,t]= PSinputs[,17]
N5.P.alphaS_Male.N[k, ,t]= PSinputs[,18]
N5.P.alpha_Male.N[k, ,t] = PSinputs[,19]
N5.Psmolt_Female.target[k, ,t]=PSinputs[,27]
N5.Pspawn_Female.target[k, ,t]=PSinputs[,28]
N5.Pstay_Female.target[k, ,t]=PSinputs[,29]
N5.P_Female.rate[k, ,t] = PSinputs[,33]
N5.Psmolt_Male.target[k, ,t]=PSinputs[,30]
N5.Pspawn_Male.target[k, ,t]=PSinputs[,31]
N5.Pstay_Male.target[k, ,t]=PSinputs[,32]
N5.P_Male.rate[k, ,t] = PSinputs[,34]
PSinputs
N5.P_Male.rate
N5.cap.mu[k, ,t] = PSinputs[,20]
N5.cap.sigmaR[k, ,t]= PSinputs[,21]
N5.cap.sigmaT[k, ,t]= PSinputs[,22]
N5.cap.sigmaS[k, ,t]= PSinputs[,23]
N5.cap.sigma[k, ,t] = PSinputs[,24]
N5.cap.target[k, ,t] = PSinputs[,35]
N5.cap.rate[k, ,t] = PSinputs[,36]
N5.Rainbow.Fecundity[k, ,t] = PSinputs[,37]
}
N5.Rainbow.Fecundity
N5.cap.mu
rm(PSinputs)
#o.inputs = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=113, endRow=(113+10-1), colClasses=rep("numeric",21),
## rowIndex=113:(113+10-1),
# colIndex=4:24, header=F)
o.inputs = read.csv(Watershed.Input.File, header=F,
skip=115, nrow=10)[, 4:24]
o.inputs
for (t in T.lo:T.hi) {
Mat8Plus_Female.mu[k, ,t] = o.inputs[,1]
Mat8Plus_Female.alphaR.N[k, ,t] = o.inputs[,2]
Mat8Plus_Female.alphaT.N[k, ,t] = o.inputs[,3]
Mat8Plus_Female.alphaS.N[k, ,t] = o.inputs[,4]
Mat8Plus_Female.alpha.N[k, ,t] = o.inputs[,5]
Mat8Plus_Female.target[k, ,t] = o.inputs[,16]
Mat8Plus_Female.rate[k, ,t] = o.inputs[,17]
Mat8Plus_Male.mu[k, ,t] = o.inputs[,6]
Mat8Plus_Male.alphaR.N[k, ,t] = o.inputs[,7]
Mat8Plus_Male.alphaT.N[k, ,t] = o.inputs[,8]
Mat8Plus_Male.alphaS.N[k, ,t] = o.inputs[,9]
Mat8Plus_Male.alpha.N[k, ,t] = o.inputs[,10]
Mat8Plus_Male.target[k, ,t] = o.inputs[,18]
Mat8Plus_Male.rate[k, ,t] = o.inputs[,19]
C_ocean.mu[k, ,t] = o.inputs[,11]
C_ocean.sigmaR[k, ,t] = o.inputs[,12]
C_ocean.sigmaT[k, ,t] = o.inputs[,13]
C_ocean.sigmaS[k, ,t] = o.inputs[,14]
C_ocean.sigma[k, ,t] = o.inputs[,15]
C_ocean.target[k, ,t] = o.inputs[, 20]
C_ocean.rate[k, ,t] = o.inputs[,21]
}
rm(o.inputs)
### read "frac"
#fractions = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=128, endRow=(128+5-1),colClasses=rep("numeric", 84),
## rowIndex=128:(128+5-1),
# colIndex=4:87, header=F)
fractions = read.csv(Watershed.Input.File, header=F,
skip=130, nrows = 5)[,4:87]
fractions
#dim(frac.mu)
for (t in T.lo:T.hi) {
for (j in 1:J) {
frac.mu[k, ,j,t] = fractions[,j]
frac.sigmaR[k, ,j,t] = fractions[,j+12]
frac.sigmaT[k, ,j,t] = fractions[,j+24]
frac.sigmaS[k, ,j,t] = fractions[,j+36]
frac.sigma[k, ,j,t] = fractions[,j+48]
frac.target[k, ,j,t] = fractions[,j+60]
frac.rate[k, , j,t] = fractions[,j+72]
} # close j
} #close t
frac.rate[k, , ,t]
rm(fractions)
dim(frac.mu)
frac.mu[1,,,1:10]
frac.target[1,,,1:10]
###################
#harvest = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow = 137, endRow=138, colClasses=rep("numeric", 7),
## rowIndex=137:138,
# colIndex=3:9, header=F)
harvest = read.csv(Watershed.Input.File, header=F,
skip = 139, nrows = 2)[, 3:9]
#harvest
for (t in T.lo:T.hi) {
harvest.wild.mu[k,t] = harvest[1,1]
harvest.wild.sigmaR[k,t] = harvest[1,2]
harvest.wild.sigmaT[k,t] = harvest[1,3]
harvest.wild.sigmaS[k,t] = harvest[1,4]
harvest.wild.sigma[k,t] = harvest[1,5]
harvest.wild.target[k,t] = harvest[1,6]
harvest.wild.rate[k,t] = harvest[1,7]
harvest.hatch.mu[k,t] = harvest[2,1]
harvest.hatch.sigmaR[k,t] = harvest[2,2]
harvest.hatch.sigmaT[k,t] = harvest[2,3]
harvest.hatch.sigmaS[k,t] = harvest[2,4]
harvest.hatch.sigma[k,t] = harvest[2,5]
harvest.hatch.target[k,t] = harvest[2,6]
harvest.hatch.rate[k,t] = harvest[2,7]
} # close t
rm(harvest)
################################################
#Hatch_Fish_Inputs = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=145, endRow=145, colClasses= rep("numeric",4),
## rowIndex=145:145,
# colIndex=4:7, header=F)
Hatch_Fish_Inputs = read.csv(Watershed.Input.File, header=F,
skip=147, nrows=1)[, 2:4]
#dim(Hatch_Fish_Inputs)
for (t in T.lo:T.hi) {
Hatch_Fish.mu[k, 1:2, t]=0
Hatch_Fish.mu[k, 6:I, t] = 0
for (i in 3:5) {
Hatch_Fish.mu[k,i,t]= Hatch_Fish_Inputs[1, i-2]
}
}
#Rel_Surv_Inputs = read.xlsx2(Watershed.Input.File, sheetName=Site,
# startRow=149, endRow=160, colClasses=rep("numeric",8),
## rowIndex=149:160,
# colIndex=4:10, header=F)
Rel_Surv_Inputs = read.csv(Watershed.Input.File, header=F,
skip=152, nrow=11)[, 4:9] #Pete Feb 2016, this be nrow = 11 not 12, right?
# skip=152, nrow=12)[, 4:9] #Pete Feb 2016, this be nrow = 11 not 12, right?
#Rel_Surv_Inputs
# Will add variability at a later time ---M@
for (t in T.lo:T.hi) {
for (g in 1:G) {
for (i in 1:I) {
# the "min" is used below to assign all adult stages the same Rel_Surv
# and Rel_Comp values
Rel_Surv.mu[k,i,t,g]<-(Rel_Surv_Inputs[g, min(i,6)])
#Rel_Comp.mu[k,i,t,g]<-Rel_Comp_Inputs[g, min(i,6)]
#Rel_Comp.mu[k,i,t,g]
#Rel_Comp_Inputs[g, min(i,6)]
}
}
}
Rel_Surv_Inputs
Rel_Surv_Inputs[g, min(i,6)]
Rel_Surv.mu[k,,t,]
rm(Rel_Surv_Inputs)
#rm(Rel_Comp_Inputs)
Fecund_Inputs = read.csv(Watershed.Input.File, header=F,
skip=168, nrow=11)[, 4:13]
for (t in T.lo:T.hi) {
Female_Fecundity[k,,t,] = t(Fecund_Inputs)
}
rm(Fecund_Inputs)
#Post_Spawn_Survival_Anadromous = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
#Post_Spawn_Survival_Rainbow = array(rep(0, K*10*Tr*G), c(K,10,Tr,G))
Post_Spawn_Survival_Anadromous_Inputs =
read.csv(Watershed.Input.File, header=F,
skip=184, nrow=11)[, 4:23]
for (t in T.lo:T.hi) {
Post_Spawn_Survival_Anadromous_M.mu[k,,t,] = t(Post_Spawn_Survival_Anadromous_Inputs[,1:10])
Post_Spawn_Survival_Anadromous_F.mu[k,,t,] = t(Post_Spawn_Survival_Anadromous_Inputs[,11:20])
}
rm(Post_Spawn_Survival_Anadromous_Inputs)
Post_Spawn_Survival_Rainbow_Inputs =
read.csv(Watershed.Input.File, header=F,
skip=199, nrow=11)[, 4:23] #Pete October 2015 Fix--was previously referencing the wrong row...
for (t in T.lo:T.hi) {
Post_Spawn_Survival_Rainbow_M.mu[k,,t,] = t(Post_Spawn_Survival_Rainbow_Inputs[,1:10])
Post_Spawn_Survival_Rainbow_F.mu[k,,t,] = t(Post_Spawn_Survival_Rainbow_Inputs[,11:20])
}
rm(Post_Spawn_Survival_Rainbow_Inputs)
} # close site
# Cross Site Migration Matrix
Cross.Site.Mig = read.csv(as.character(Cross.site.migration.file.names[n.step]), header=F,
skip= 6, nrows=43)[, 3:27]
Cross.Site.Mig
#Cross.Site.Mig = read.xlsx2("Cross_Site_Migration.csv",
# startRow = 8, endRow=50, colClasses = rep("numeric", 25),
## rowIndex=8:50,
# colIndex=4:28, header=F,)
Cross.Site.Mig
for (t in T.lo:T.hi) {
for (k1 in 1:K) {
for (k2 in 1:K) {
Fry.x.siteMigration.mu[k1, k2,t] = Cross.Site.Mig[k1,k2]
Par.x.siteMigration.mu[k1, k2,t] = Cross.Site.Mig[k1+11,k2]
Presmolt.x.siteMigration.mu[k1, k2,t] = Cross.Site.Mig[k1+22,k2]
Spawner.x.siteMigration.mu[k1, k2,t] = Cross.Site.Mig[k1+33,k2]
Fry.x.siteMigration.target[k1, k2,t] = Cross.Site.Mig[k1,k2+14]
Par.x.siteMigration.target[k1, k2,t] = Cross.Site.Mig[k1+11,k2+14]
Presmolt.x.siteMigration.target[k1, k2,t] = Cross.Site.Mig[k1+22,k2+14]
Spawner.x.siteMigration.target[k1, k2,t] = Cross.Site.Mig[k1+33,k2+14]
}
Fry.x.siteMigration.alphaR[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1, 11]))
Fry.x.siteMigration.alphaT[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1, 12]))
Fry.x.siteMigration.alphaS[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1, 13]))
Fry.x.siteMigration.alpha[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1, 14]))
Fry.x.siteMigration.rate[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1, 25]))
Par.x.siteMigration.alphaR[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+11,11]))
Par.x.siteMigration.alphaT[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+11,12]))
Par.x.siteMigration.alphaS[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+11,13]))
Par.x.siteMigration.alpha[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+11,14]))
Par.x.siteMigration.rate[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1+11, 25]))
Presmolt.x.siteMigration.alphaR[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+22,11]))
Presmolt.x.siteMigration.alphaT[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+22,12]))
Presmolt.x.siteMigration.alphaS[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+22,13]))
Presmolt.x.siteMigration.alpha[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+22,14]))
Presmolt.x.siteMigration.rate[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1+22, 25]))
Spawner.x.siteMigration.alphaR[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+33,11]))
Spawner.x.siteMigration.alphaT[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+33,12]))
Spawner.x.siteMigration.alphaS[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+33,13]))
Spawner.x.siteMigration.alpha[k1,t] = as.numeric(as.character(Cross.Site.Mig[k1+33,14]))
Spawner.x.siteMigration.rate[k1, t] = as.numeric(as.character(Cross.Site.Mig[k1+33, 25]))
}}
Fry.x.siteMigration.target
Fry.x.siteMigration.alphaT
} # close cycling through number of input files
# Need to return EVERYTHING!!!
Inputs =
list(
"frac.mu"=frac.mu, "frac.sigmaR"=frac.sigmaR, "frac.sigmaT"=frac.sigmaT,
"frac.sigmaS"=frac.sigmaS, "frac.sigma"=frac.sigma,
"frac.target" = frac.target, "frac.rate" = frac.rate,
"harvest.wild.mu"= harvest.wild.mu,
"harvest.wild.sigmaR"= harvest.wild.sigmaR,
"harvest.wild.sigmaT"= harvest.wild.sigmaT,
"harvest.wild.sigmaS"= harvest.wild.sigmaS,
"harvest.wild.sigma"= harvest.wild.sigma,
"harvest.wild.target" = harvest.wild.target,
"harvest.wild.rate" = harvest.wild.rate,
"harvest.hatch.mu"= harvest.hatch.mu,
"harvest.hatch.sigmaR"= harvest.hatch.sigmaR,
"harvest.hatch.sigmaT"= harvest.hatch.sigmaT,
"harvest.hatch.sigmaS"= harvest.hatch.sigmaS,
"harvest.hatch.sigma"= harvest.hatch.sigma,
"harvest.hatch.target" = harvest.hatch.target,
"harvest.hatch.rate" = harvest.hatch.rate,
"Prod_Scalar.mu"=Prod_Scalar.mu, "Prod_Scalar.sigmaR"=Prod_Scalar.sigmaR,
"Prod_Scalar.sigmaT"=Prod_Scalar.sigmaT,
"Prod_Scalar.sigmaS"=Prod_Scalar.sigmaS,
"Prod_Scalar.sigma"=Prod_Scalar.sigma,
"Prod_Scalar.target" = Prod_Scalar.target,
"Prod_Scalar.rate" = Prod_Scalar.rate,
"M.mu"= M.mu, "M.alphaR.N" = M.alphaR.N,
"M.alphaT.N" = M.alphaT.N, "M.alphaS.N" = M.alphaS.N,
"M.alpha.N" = M.alpha.N,
"M.target"=M.target, "M.rate" = M.rate,
"Ak_x_Lqk.mu"=Ak_x_Lqk.mu, "Ak_x_Lqk.sigmaR"=Ak_x_Lqk.sigmaR,
"Ak_x_Lqk.sigmaT"=Ak_x_Lqk.sigmaT,"Ak_x_Lqk.sigmaS"=Ak_x_Lqk.sigmaS,
"Ak_x_Lqk.sigma"=Ak_x_Lqk.sigma,
"Ak_x_Lqk.target"=Ak_x_Lqk.target,
"Ak_x_Lqk.rate"= Ak_x_Lqk.rate,
"D.mu"= D.mu, "D.sigmaR" = D.sigmaR, "D.sigmaT" = D.sigmaT,
"D.sigmaS" = D.sigmaS, "D.sigma" = D.sigma,
"D.target" = D.target, "D.rate" = D.rate,
"Sr.mu" = Sr.mu, "Sr.alphaR.N" = Sr.alphaR.N, "Sr.alphaT.N" = Sr.alphaT.N,
"Sr.alphaS.N" = Sr.alphaS.N, "Sr.alpha.N" = Sr.alpha.N,
"Sr.target" = Sr.target, "Sr.rate"=Sr.rate,
"C_ocean.mu" = C_ocean.mu, "C_ocean.sigmaR" = C_ocean.sigmaR,
"C_ocean.sigmaT" = C_ocean.sigmaT,
"C_ocean.sigmaS" = C_ocean.sigmaS, "C_ocean.sigma" = C_ocean.sigma,
"C_ocean.target" = C_ocean.target, "C_ocean.rate" = C_ocean.rate,
"SR5.mu" = SR5.mu, "SR5.alphaR.N" = SR5.alphaR, "SR5.alphaT.N" = SR5.alphaT,
"SR5.alphaS.N" = SR5.alphaS, "SR5.alpha.N" = SR5.alpha,
"SR5.target" = SR5.target, "SR5.rate" = SR5.rate,
"N5.Psmolt_Female.mu" = N5.Psmolt_Female.mu,
"N5.Pspawn_Female.mu" = N5.Pspawn_Female.mu,
"N5.Pstay_Female.mu" = N5.Pstay_Female.mu,
"N5.Psmolt_Female.target" = N5.Psmolt_Female.target,
"N5.Pspawn_Female.target" = N5.Pspawn_Female.target,
"N5.Pstay_Female.target" = N5.Pstay_Female.target,
"N5.P_Female.rate" = N5.P_Female.rate,
"N5.P.alphaR_Female.N" = N5.P.alphaR_Female.N,
"N5.P.alphaT_Female.N" = N5.P.alphaT_Female.N,
"N5.P.alphaS_Female.N" = N5.P.alphaS_Female.N,
"N5.P.alpha_Female.N" = N5.P.alpha_Female.N,
"N5.Psmolt_Male.mu" = N5.Psmolt_Male.mu,
"N5.Pspawn_Male.mu" = N5.Pspawn_Male.mu,
"N5.Pstay_Male.mu" = N5.Pstay_Male.mu,
"N5.Psmolt_Male.target" = N5.Psmolt_Male.target,
"N5.Pspawn_Male.target" = N5.Pspawn_Male.target,
"N5.Pstay_Male.target" = N5.Pstay_Male.target,
"N5.P_Male.rate" = N5.P_Male.rate,
"N5.P.alphaR_Male.N" = N5.P.alphaR_Male.N,
"N5.P.alphaT_Male.N" = N5.P.alphaT_Male.N,
"N5.P.alphaS_Male.N" = N5.P.alphaS_Male.N,
"N5.P.alpha_Male.N" = N5.P.alpha_Male.N,
"N5.cap.mu" = N5.cap.mu, "N5.cap.sigmaR" = N5.cap.sigmaR,
"N5.cap.sigmaT" = N5.cap.sigmaT,"N5.cap.sigmaS" = N5.cap.sigmaS,
"N5.cap.sigma" = N5.cap.sigma,
"N5.cap.target" = N5.cap.target, "N5.cap.rate" = N5.cap.rate,
"Mat8Plus_Female.mu" = Mat8Plus_Female.mu,
"Mat8Plus_Female.alphaR.N" = Mat8Plus_Female.alphaR.N,
"Mat8Plus_Female.alphaT.N" = Mat8Plus_Female.alphaT.N,
"Mat8Plus_Female.alphaS.N" = Mat8Plus_Female.alphaS.N,
"Mat8Plus_Female.alpha.N" = Mat8Plus_Female.alpha.N,
"Mat8Plus_Female.target" = Mat8Plus_Female.target,
"Mat8Plus_Female.rate" = Mat8Plus_Female.rate,
"Mat8Plus_Male.mu" = Mat8Plus_Male.mu,
"Mat8Plus_Male.alphaR.N" = Mat8Plus_Male.alphaR.N,
"Mat8Plus_Male.alphaT.N" = Mat8Plus_Male.alphaT.N,
"Mat8Plus_Male.alphaS.N" = Mat8Plus_Male.alphaS.N,
"Mat8Plus_Male.alpha.N" = Mat8Plus_Male.alpha.N,
"Mat8Plus_Male.target" = Mat8Plus_Male.target,
"Mat8Plus_Male.rate" = Mat8Plus_Male.rate,
### will add variabilities here later for below, if needed/wanted....
"Hatch_Fish.mu"=Hatch_Fish.mu,
"Rel_Surv.mu"=Rel_Surv.mu,
"Rel_Comp.mu"=Rel_Comp.mu,
"Rel_Fecund.mu"=Rel_Fecund.mu,
"Female_Fecundity.mu"=Female_Fecundity,
"Post_Spawn_Survival_Anadromous_M.mu" = Post_Spawn_Survival_Anadromous_M.mu,
"Post_Spawn_Survival_Anadromous_F.mu" = Post_Spawn_Survival_Anadromous_F.mu,
"Post_Spawn_Survival_Rainbow_M.mu" = Post_Spawn_Survival_Rainbow_M.mu ,
"Post_Spawn_Survival_Rainbow_F.mu" = Post_Spawn_Survival_Rainbow_F.mu ,
#"Female_Frac.mu"= Female_Frac.mu,
"Fry.x.siteMigration.mu"=Fry.x.siteMigration.mu,
"Par.x.siteMigration.mu"=Par.x.siteMigration.mu,
"Presmolt.x.siteMigration.mu"=Presmolt.x.siteMigration.mu,
"Spawner.x.siteMigration.mu"=Spawner.x.siteMigration.mu,
"Fry.x.siteMigration.target"=Fry.x.siteMigration.target,
"Par.x.siteMigration.target"=Par.x.siteMigration.target,
"Presmolt.x.siteMigration.target"=Presmolt.x.siteMigration.target,
"Spawner.x.siteMigration.target"=Spawner.x.siteMigration.target,
"Fry.x.siteMigration.alphaR.N" = Fry.x.siteMigration.alphaR,
"Fry.x.siteMigration.alphaT.N" =Fry.x.siteMigration.alphaT,
"Fry.x.siteMigration.alphaS.N" = Fry.x.siteMigration.alphaS,
"Fry.x.siteMigration.alpha.N" = Fry.x.siteMigration.alpha,
"Fry.x.siteMigration.rate" = Fry.x.siteMigration.rate,
"Par.x.siteMigration.alphaR.N" = Par.x.siteMigration.alphaR,
"Par.x.siteMigration.alphaT.N" = Par.x.siteMigration.alphaT,
"Par.x.siteMigration.alphaS.N" = Par.x.siteMigration.alphaS,
"Par.x.siteMigration.alpha.N" = Par.x.siteMigration.alpha,
"Par.x.siteMigration.rate" = Par.x.siteMigration.rate,
"Presmolt.x.siteMigration.alphaR.N" = Presmolt.x.siteMigration.alphaR,
"Presmolt.x.siteMigration.alphaT.N" = Presmolt.x.siteMigration.alphaT,
"Presmolt.x.siteMigration.alphaS.N" = Presmolt.x.siteMigration.alphaS,
"Presmolt.x.siteMigration.alpha.N" = Presmolt.x.siteMigration.alpha,
"Presmolt.x.siteMigration.rate" = Presmolt.x.siteMigration.rate,
"Spawner.x.siteMigration.alphaR.N" = Spawner.x.siteMigration.alphaR,
"Spawner.x.siteMigration.alphaT.N" = Spawner.x.siteMigration.alphaT,
"Spawner.x.siteMigration.alphaS.N" = Spawner.x.siteMigration.alphaS,
"Spawner.x.siteMigration.alpha.N" = Spawner.x.siteMigration.alpha,
"Spawner.x.siteMigration.rate" = Spawner.x.siteMigration.rate,
"N5.Rainbow.Fecundity" = N5.Rainbow.Fecundity
)
Inputs
detach(header)
return(Inputs)
}
# End of Read Data Function
#### End of Function #################
######################################
######################################
#######
#header<- Read.Header("Watershed_Header_File.xlsx")
#Inputs<-Read.Input.File(header)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/plotNovelty-methods.R
\docType{methods}
\name{plotNovelty}
\alias{plotNovelty}
\alias{plotNovelty,SingleCellExperiment-method}
\title{Plot Novelty Score}
\usage{
plotNovelty(object, ...)
\S4method{plotNovelty}{SingleCellExperiment}(object, geom = c("violin",
"ridgeline", "ecdf", "histogram", "boxplot"), interestingGroups,
min = 0L, fill = getOption("bcbio.discrete.fill", NULL),
trans = "identity", title = "genes per UMI (novelty)")
}
\arguments{
\item{object}{Object.}
\item{...}{Additional arguments.}
\item{geom}{\code{string}. Plot type. Uses \code{\link[=match.arg]{match.arg()}} and defaults to the first
argument in the \code{character} vector.}
\item{interestingGroups}{\code{character} or \code{NULL}. Character vector of
interesting groups. Must be formatted in camel case and intersect with
\code{\link[=sampleData]{sampleData()}} colnames.}
\item{min}{\code{scalar numeric}. Recommended minimum value cutoff.}
\item{fill}{\code{ggproto}/\code{ScaleDiscrete} or \code{NULL}. Desired ggplot2 fill scale.
Must supply discrete values. When set to \code{NULL}, the default ggplot2 color
palette will be used. If manual color definitions are desired, we recommend
using \code{\link[ggplot2:scale_fill_manual]{ggplot2::scale_fill_manual()}}.
To set the discrete fill palette globally, use
\code{options(bcbio.discrete.fill = scale_fill_viridis_d())}.}
\item{trans}{\code{string}. Name of the axis scale transformation to apply. See
\code{help("scale_x_continuous", "ggplot2")} for more information.}
\item{title}{\code{string} or \code{NULL}. Plot title.}
}
\value{
\code{ggplot}.
}
\description{
"Novelty" refers to log10 genes detected per count.
}
\examples{
plotNovelty(indrops_small)
}
\seealso{
Other Quality Control Functions: \code{\link{barcodeRanksPerSample}},
\code{\link{filterCells}}, \code{\link{metrics}},
\code{\link{plotCellCounts}},
\code{\link{plotGenesPerCell}},
\code{\link{plotMitoRatio}},
\code{\link{plotMitoVsCoding}}, \code{\link{plotQC}},
\code{\link{plotReadsPerCell}},
\code{\link{plotUMIsPerCell}},
\code{\link{plotZerosVsDepth}}
}
\author{
Michael Steinbaugh
}
\concept{Quality Control Functions}
| /man/plotNovelty.Rd | permissive | chitrita/bcbioSingleCell | R | false | true | 2,261 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/plotNovelty-methods.R
\docType{methods}
\name{plotNovelty}
\alias{plotNovelty}
\alias{plotNovelty,SingleCellExperiment-method}
\title{Plot Novelty Score}
\usage{
plotNovelty(object, ...)
\S4method{plotNovelty}{SingleCellExperiment}(object, geom = c("violin",
"ridgeline", "ecdf", "histogram", "boxplot"), interestingGroups,
min = 0L, fill = getOption("bcbio.discrete.fill", NULL),
trans = "identity", title = "genes per UMI (novelty)")
}
\arguments{
\item{object}{Object.}
\item{...}{Additional arguments.}
\item{geom}{\code{string}. Plot type. Uses \code{\link[=match.arg]{match.arg()}} and defaults to the first
argument in the \code{character} vector.}
\item{interestingGroups}{\code{character} or \code{NULL}. Character vector of
interesting groups. Must be formatted in camel case and intersect with
\code{\link[=sampleData]{sampleData()}} colnames.}
\item{min}{\code{scalar numeric}. Recommended minimum value cutoff.}
\item{fill}{\code{ggproto}/\code{ScaleDiscrete} or \code{NULL}. Desired ggplot2 fill scale.
Must supply discrete values. When set to \code{NULL}, the default ggplot2 color
palette will be used. If manual color definitions are desired, we recommend
using \code{\link[ggplot2:scale_fill_manual]{ggplot2::scale_fill_manual()}}.
To set the discrete fill palette globally, use
\code{options(bcbio.discrete.fill = scale_fill_viridis_d())}.}
\item{trans}{\code{string}. Name of the axis scale transformation to apply. See
\code{help("scale_x_continuous", "ggplot2")} for more information.}
\item{title}{\code{string} or \code{NULL}. Plot title.}
}
\value{
\code{ggplot}.
}
\description{
"Novelty" refers to log10 genes detected per count.
}
\examples{
plotNovelty(indrops_small)
}
\seealso{
Other Quality Control Functions: \code{\link{barcodeRanksPerSample}},
\code{\link{filterCells}}, \code{\link{metrics}},
\code{\link{plotCellCounts}},
\code{\link{plotGenesPerCell}},
\code{\link{plotMitoRatio}},
\code{\link{plotMitoVsCoding}}, \code{\link{plotQC}},
\code{\link{plotReadsPerCell}},
\code{\link{plotUMIsPerCell}},
\code{\link{plotZerosVsDepth}}
}
\author{
Michael Steinbaugh
}
\concept{Quality Control Functions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare_peaks.r
\name{subsetByRegion}
\alias{subsetByRegion}
\title{Subset by region}
\usage{
subsetByRegion(ranges, chrom, start, end)
}
\arguments{
\item{ranges}{GRanges object}
\item{chrom}{selected chromosome}
\item{start}{selected starting position}
\item{end}{selected ending position}
}
\value{
GRanges object of track in selected coordinates
}
\description{
Subset by region
}
| /man/subsetByRegion.Rd | no_license | emdann/hexamerModel | R | false | true | 466 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare_peaks.r
\name{subsetByRegion}
\alias{subsetByRegion}
\title{Subset by region}
\usage{
subsetByRegion(ranges, chrom, start, end)
}
\arguments{
\item{ranges}{GRanges object}
\item{chrom}{selected chromosome}
\item{start}{selected starting position}
\item{end}{selected ending position}
}
\value{
GRanges object of track in selected coordinates
}
\description{
Subset by region
}
|
library(readxl)
library(fpp2)
library(portes)
setwd("C:/Users/cxie/Desktop/Forecasting")
data <- read_excel("DataSets2020.xlsx", sheet = "Fatalities")
Fat <- ts(data[, 2], frequency = 1, start = 1965)
plot(Fat)
tsdisplay(Fat)
fat_train <- window(Fat, end = 2008)
fat_test <- window(Fat, start = 2009)
h <- length(fat_test)
# naive
f1 <- rwf(fat_train, h=h)
# drift
f2 <- rwf(fat_train, drift = TRUE, h=h)
plot(Fat,main="Fatalities", ylab="",xlab="Day")
lines(f1$mean,col=4)
lines(f2$mean,col=2)
plot(f3)
lines(fat_test, col="red")
a1 <- accuracy(f1, fat_test)[,c(2,3,5,6)]
# RMSE MAE MAPE MASE
# Training set 140.3844 107.2326 5.573924 1.000000
# Test set 239.7130 212.9000 30.502956 1.985404
a2 <- accuracy(f2, fat_test)[,c(2,3,5,6)]
# RMSE MAE MAPE MASE
# Training set 136.42816 106.71282 5.347486 0.9951531
# Test set 42.53407 36.09302 5.085152 0.3365864
checkresiduals(f1)
res <- residuals(f1)
res <- na.omit(res)
LjungBox(res, lags=seq(1,12,3), order=0)
# lags statistic df p-value
# 1 0.6465914 1 0.4213340
# 4 2.0700046 4 0.7228848
# 7 6.5571802 7 0.4763926
# 10 9.4440062 10 0.4905466
# make prediction on the whole data
f3 <- rwf(Fat, drift = TRUE, h=2)
plot(f3)
| /class-execerise_session1.R | no_license | xiechenxin/Forecasting | R | false | false | 1,336 | r | library(readxl)
library(fpp2)
library(portes)
setwd("C:/Users/cxie/Desktop/Forecasting")
data <- read_excel("DataSets2020.xlsx", sheet = "Fatalities")
Fat <- ts(data[, 2], frequency = 1, start = 1965)
plot(Fat)
tsdisplay(Fat)
fat_train <- window(Fat, end = 2008)
fat_test <- window(Fat, start = 2009)
h <- length(fat_test)
# naive
f1 <- rwf(fat_train, h=h)
# drift
f2 <- rwf(fat_train, drift = TRUE, h=h)
plot(Fat,main="Fatalities", ylab="",xlab="Day")
lines(f1$mean,col=4)
lines(f2$mean,col=2)
plot(f3)
lines(fat_test, col="red")
a1 <- accuracy(f1, fat_test)[,c(2,3,5,6)]
# RMSE MAE MAPE MASE
# Training set 140.3844 107.2326 5.573924 1.000000
# Test set 239.7130 212.9000 30.502956 1.985404
a2 <- accuracy(f2, fat_test)[,c(2,3,5,6)]
# RMSE MAE MAPE MASE
# Training set 136.42816 106.71282 5.347486 0.9951531
# Test set 42.53407 36.09302 5.085152 0.3365864
checkresiduals(f1)
res <- residuals(f1)
res <- na.omit(res)
LjungBox(res, lags=seq(1,12,3), order=0)
# lags statistic df p-value
# 1 0.6465914 1 0.4213340
# 4 2.0700046 4 0.7228848
# 7 6.5571802 7 0.4763926
# 10 9.4440062 10 0.4905466
# make prediction on the whole data
f3 <- rwf(Fat, drift = TRUE, h=2)
plot(f3)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DatabaseLinke.R
\name{link_SNPedia_clip2clip}
\alias{link_SNPedia_clip2clip}
\title{link_SNPedia_clip2clip}
\usage{
link_SNPedia_clip2clip(
rdIDs = clipr::read_clip_tbl(header = F),
searchQueryPrefix = "https://www.snpedia.com/index.php/",
as.ExcelLink = T,
as.MarkDownLink = F
)
}
\arguments{
\item{rdIDs}{A list of rsIDs from an Excel column.}
\item{searchQueryPrefix}{The base URL for SNPedia search, default: 'https://www.snpedia.com/index.php/'.}
\item{as.ExcelLink}{A logical indicating whether to format the links as Excel links, default: TRUE.}
\item{as.MarkDownLink}{A logical indicating whether to format the links as Markdown links, default: FALSE.}
}
\description{
Generate SNPedia links from a list of rsIDs copied from an Excel column.
}
\examples{
link_SNPedia_clip2clip(rdIDs = clipr::read_clip_tbl(header=F))
}
| /man/link_SNPedia_clip2clip.Rd | permissive | vertesy/DatabaseLinke.R | R | false | true | 918 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DatabaseLinke.R
\name{link_SNPedia_clip2clip}
\alias{link_SNPedia_clip2clip}
\title{link_SNPedia_clip2clip}
\usage{
link_SNPedia_clip2clip(
rdIDs = clipr::read_clip_tbl(header = F),
searchQueryPrefix = "https://www.snpedia.com/index.php/",
as.ExcelLink = T,
as.MarkDownLink = F
)
}
\arguments{
\item{rdIDs}{A list of rsIDs from an Excel column.}
\item{searchQueryPrefix}{The base URL for SNPedia search, default: 'https://www.snpedia.com/index.php/'.}
\item{as.ExcelLink}{A logical indicating whether to format the links as Excel links, default: TRUE.}
\item{as.MarkDownLink}{A logical indicating whether to format the links as Markdown links, default: FALSE.}
}
\description{
Generate SNPedia links from a list of rsIDs copied from an Excel column.
}
\examples{
link_SNPedia_clip2clip(rdIDs = clipr::read_clip_tbl(header=F))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/histo.dist.R
\name{histo.dist}
\alias{histo.dist}
\title{Histogram of all degrees of a network}
\usage{
histo.dist(g)
}
\arguments{
\item{g}{The input network.}
}
\value{
A .gif plot.
}
\description{
Plot the histogram of all degrees of a network.
}
\details{
Plot the histogram of all degrees of a network.
}
\examples{
\dontrun{
x <- net.erdos.renyi.gnp(1000, 0.05)
histo.dist(x)}
}
\author{
Xu Dong, Nazrul Shaikh.
}
| /man/histo.dist.Rd | no_license | ajagaja/fastnet | R | false | true | 499 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/histo.dist.R
\name{histo.dist}
\alias{histo.dist}
\title{Histogram of all degrees of a network}
\usage{
histo.dist(g)
}
\arguments{
\item{g}{The input network.}
}
\value{
A .gif plot.
}
\description{
Plot the histogram of all degrees of a network.
}
\details{
Plot the histogram of all degrees of a network.
}
\examples{
\dontrun{
x <- net.erdos.renyi.gnp(1000, 0.05)
histo.dist(x)}
}
\author{
Xu Dong, Nazrul Shaikh.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-activation.R
\name{nn_celu}
\alias{nn_celu}
\title{CELU module}
\usage{
nn_celu(alpha = 1, inplace = FALSE)
}
\arguments{
\item{alpha}{the \eqn{\alpha} value for the CELU formulation. Default: 1.0}
\item{inplace}{can optionally do the operation in-place. Default: \code{FALSE}}
}
\description{
Applies the element-wise function:
}
\details{
\deqn{
\mbox{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
}
More details can be found in the paper
\href{https://arxiv.org/abs/1704.07483}{Continuously Differentiable Exponential Linear Units}.
}
\section{Shape}{
\itemize{
\item Input: \eqn{(N, *)} where \code{*} means, any number of additional
dimensions
\item Output: \eqn{(N, *)}, same shape as the input
}
}
\examples{
if (torch_is_installed()) {
m <- nn_celu()
input <- torch_randn(2)
output <- m(input)
}
}
| /man/nn_celu.Rd | permissive | krzjoa/torch | R | false | true | 910 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-activation.R
\name{nn_celu}
\alias{nn_celu}
\title{CELU module}
\usage{
nn_celu(alpha = 1, inplace = FALSE)
}
\arguments{
\item{alpha}{the \eqn{\alpha} value for the CELU formulation. Default: 1.0}
\item{inplace}{can optionally do the operation in-place. Default: \code{FALSE}}
}
\description{
Applies the element-wise function:
}
\details{
\deqn{
\mbox{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))
}
More details can be found in the paper
\href{https://arxiv.org/abs/1704.07483}{Continuously Differentiable Exponential Linear Units}.
}
\section{Shape}{
\itemize{
\item Input: \eqn{(N, *)} where \code{*} means, any number of additional
dimensions
\item Output: \eqn{(N, *)}, same shape as the input
}
}
\examples{
if (torch_is_installed()) {
m <- nn_celu()
input <- torch_randn(2)
output <- m(input)
}
}
|
#==============================================================================#
# univariateQual #
#==============================================================================#
#' univariateQual
#'
#' \code{univariateQual} Performs univariate analysis on a single qualitative
#' or categorical variable. The function returns a contingency table as well
#' as a stacked bar plot showing frequencies and percentages.
#'
#' @param data Single column data frame containing the categorical variable
#' @param xLab Capitalized character string for the variable name or label
#'
#' @return analysis List containing:
#' 1. Contingency table
#' 2. Frequency Proportion Barplot
#'
#' @author John James, \email{jjames@@datasciencesalon.org}
#' @family regression functions
#' @export
univariateQual <- function(data, xLab) {
bp <- plotFreqProp(data, xLab = xLab, order = "d")
return(bp)
}
| /R/univariateQual.R | no_license | john-james-ai/mdb | R | false | false | 953 | r | #==============================================================================#
# univariateQual #
#==============================================================================#
#' univariateQual
#'
#' \code{univariateQual} Performs univariate analysis on a single qualitative
#' or categorical variable. The function returns a contingency table as well
#' as a stacked bar plot showing frequencies and percentages.
#'
#' @param data Single column data frame containing the categorical variable
#' @param xLab Capitalized character string for the variable name or label
#'
#' @return analysis List containing:
#' 1. Contingency table
#' 2. Frequency Proportion Barplot
#'
#' @author John James, \email{jjames@@datasciencesalon.org}
#' @family regression functions
#' @export
univariateQual <- function(data, xLab) {
bp <- plotFreqProp(data, xLab = xLab, order = "d")
return(bp)
}
|
library(data.table)
library(batchtools)
library(ggplot2)
# Simulation parameters ----------------------------------------------------------------
num_replicates <- 1000
n <- 100
p <- 10
# Algorithm parameters ----------------------------------------------------------------
learners <- c("regr.lm", "regr.ranger", "regr.nnet", "regr.svm")
tests <- c("t", "fisher", "U")
# Registry ----------------------------------------------------------------
reg_name <- "cpi_power_cv"
reg_dir <- file.path("registries", reg_name)
dir.create("registries", showWarnings = FALSE)
unlink(reg_dir, recursive = TRUE)
makeExperimentRegistry(file.dir = reg_dir,
packages = c("mlr"),
source = "cpi_mlr.R")
# Problems ----------------------------------------------------------------
sim_data <- function(data, job, n, p, ...) {
beta <- rep(c(0, 0, -1, 1, -2, 2, -3, 3, -4, 4), each = p/10)
beta0 <- 0
x <- matrix(runif(n * p), ncol = p,
dimnames = list(NULL, paste0('x', seq_len(p))))
y <- x %*% beta + beta0 + rnorm(n)
dat <- data.frame(y = y, x)
makeRegrTask(data = dat, target = "y")
}
addProblem(name = "sim", fun = sim_data)
# Algorithms ----------------------------------------------------------------
cpi <- function(data, job, instance, learner_name, ...) {
par.vals <- switch(learner_name,
regr.ranger = list(num.trees = 50),
regr.nnet = list(size = 3, decay = 1, trace = FALSE),
regr.svm = list(kernel = "radial"),
list())
brute_force_mlr(task = instance, learner = makeLearner(learner_name, par.vals = par.vals),
resampling = makeResampleDesc("CV", iters = 5), ...)
}
addAlgorithm(name = "cpi", fun = cpi)
# Experiments -----------------------------------------------------------
prob_design <- list(sim = expand.grid(n = n, p = p,
stringsAsFactors = FALSE))
algo_design <- list(cpi = expand.grid(learner_name = learners,
test = tests,
permute = TRUE,
log = TRUE,
stringsAsFactors = FALSE))
addExperiments(prob_design, algo_design, repls = num_replicates)
summarizeExperiments()
# Submit -----------------------------------------------------------
if (grepl("node\\d{2}|bipscluster", system("hostname", intern = TRUE))) {
ids <- findNotStarted()
ids[, chunk := chunk(job.id, chunk.size = 400)]
submitJobs(ids = ids, # walltime in seconds, 10 days max, memory in MB
resources = list(name = reg_name, chunks.as.arrayjobs = TRUE,
ncpus = 1, memory = 6000, walltime = 10*24*3600,
max.concurrent.jobs = 400))
} else {
submitJobs()
}
waitForJobs()
# Get results -------------------------------------------------------------
res_wide <- flatten(flatten(ijoin(reduceResultsDataTable(), getJobPars())))
res <- melt(res_wide, measure.vars = patterns("^Variable*", "^CPI*", "^statistic*", "^p.value*"),
value.name = c("Variable", "CPI", "Statistic", "p.value"))
res[, Variable := factor(Variable, levels = paste0("x", 1:unique(p)))]
saveRDS(res, "power_simulation_cv.Rds")
# Plots -------------------------------------------------------------
# Boxplots of CPI values per variable
ggplot(res, aes(x = Variable, y = CPI)) +
geom_boxplot() +
facet_wrap(~ learner_name, scales = "free") +
geom_hline(yintercept = 0, col = "red") +
xlab("Variable") + ylab("CPI value")
ggsave("cv_CPI.pdf")
# Power (mean over replications)
res[, reject := p.value <= 0.05]
res_mean <- res[, .(power = mean(reject, na.rm = TRUE)), by = .(problem, algorithm, learner_name, test, Variable)]
levels(res_mean$Variable) <- rep(c(0, 0, -1, 1, -2, 2, -3, 3, -4, 4), each = p/10)
res_mean[, Variable := abs(as.numeric(as.character(Variable)))]
res_mean[, power := mean(power), by = list(problem, algorithm, learner_name, test, Variable)]
ggplot(res_mean, aes(x = Variable, y = power, col = test, shape = test)) +
geom_line() + geom_point() +
facet_wrap(~ learner_name) +
geom_hline(yintercept = 0.05, col = "black") +
scale_color_brewer(palette = "Set1") +
xlab("Effect size") + ylab("Rejected hypotheses")
ggsave("cv_power.pdf")
| /attic/power_simulation/power_simulation_cv.R | no_license | dswatson/cpi_paper | R | false | false | 4,391 | r |
library(data.table)
library(batchtools)
library(ggplot2)
# Simulation parameters ----------------------------------------------------------------
num_replicates <- 1000
n <- 100
p <- 10
# Algorithm parameters ----------------------------------------------------------------
learners <- c("regr.lm", "regr.ranger", "regr.nnet", "regr.svm")
tests <- c("t", "fisher", "U")
# Registry ----------------------------------------------------------------
reg_name <- "cpi_power_cv"
reg_dir <- file.path("registries", reg_name)
dir.create("registries", showWarnings = FALSE)
unlink(reg_dir, recursive = TRUE)
makeExperimentRegistry(file.dir = reg_dir,
packages = c("mlr"),
source = "cpi_mlr.R")
# Problems ----------------------------------------------------------------
sim_data <- function(data, job, n, p, ...) {
beta <- rep(c(0, 0, -1, 1, -2, 2, -3, 3, -4, 4), each = p/10)
beta0 <- 0
x <- matrix(runif(n * p), ncol = p,
dimnames = list(NULL, paste0('x', seq_len(p))))
y <- x %*% beta + beta0 + rnorm(n)
dat <- data.frame(y = y, x)
makeRegrTask(data = dat, target = "y")
}
addProblem(name = "sim", fun = sim_data)
# Algorithms ----------------------------------------------------------------
cpi <- function(data, job, instance, learner_name, ...) {
par.vals <- switch(learner_name,
regr.ranger = list(num.trees = 50),
regr.nnet = list(size = 3, decay = 1, trace = FALSE),
regr.svm = list(kernel = "radial"),
list())
brute_force_mlr(task = instance, learner = makeLearner(learner_name, par.vals = par.vals),
resampling = makeResampleDesc("CV", iters = 5), ...)
}
addAlgorithm(name = "cpi", fun = cpi)
# Experiments -----------------------------------------------------------
prob_design <- list(sim = expand.grid(n = n, p = p,
stringsAsFactors = FALSE))
algo_design <- list(cpi = expand.grid(learner_name = learners,
test = tests,
permute = TRUE,
log = TRUE,
stringsAsFactors = FALSE))
addExperiments(prob_design, algo_design, repls = num_replicates)
summarizeExperiments()
# Submit -----------------------------------------------------------
if (grepl("node\\d{2}|bipscluster", system("hostname", intern = TRUE))) {
ids <- findNotStarted()
ids[, chunk := chunk(job.id, chunk.size = 400)]
submitJobs(ids = ids, # walltime in seconds, 10 days max, memory in MB
resources = list(name = reg_name, chunks.as.arrayjobs = TRUE,
ncpus = 1, memory = 6000, walltime = 10*24*3600,
max.concurrent.jobs = 400))
} else {
submitJobs()
}
waitForJobs()
# Get results -------------------------------------------------------------
res_wide <- flatten(flatten(ijoin(reduceResultsDataTable(), getJobPars())))
res <- melt(res_wide, measure.vars = patterns("^Variable*", "^CPI*", "^statistic*", "^p.value*"),
value.name = c("Variable", "CPI", "Statistic", "p.value"))
res[, Variable := factor(Variable, levels = paste0("x", 1:unique(p)))]
saveRDS(res, "power_simulation_cv.Rds")
# Plots -------------------------------------------------------------
# Boxplots of CPI values per variable
ggplot(res, aes(x = Variable, y = CPI)) +
geom_boxplot() +
facet_wrap(~ learner_name, scales = "free") +
geom_hline(yintercept = 0, col = "red") +
xlab("Variable") + ylab("CPI value")
ggsave("cv_CPI.pdf")
# Power (mean over replications)
res[, reject := p.value <= 0.05]
res_mean <- res[, .(power = mean(reject, na.rm = TRUE)), by = .(problem, algorithm, learner_name, test, Variable)]
levels(res_mean$Variable) <- rep(c(0, 0, -1, 1, -2, 2, -3, 3, -4, 4), each = p/10)
res_mean[, Variable := abs(as.numeric(as.character(Variable)))]
res_mean[, power := mean(power), by = list(problem, algorithm, learner_name, test, Variable)]
ggplot(res_mean, aes(x = Variable, y = power, col = test, shape = test)) +
geom_line() + geom_point() +
facet_wrap(~ learner_name) +
geom_hline(yintercept = 0.05, col = "black") +
scale_color_brewer(palette = "Set1") +
xlab("Effect size") + ylab("Rejected hypotheses")
ggsave("cv_power.pdf")
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/cervix.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.4,family="gaussian",standardize=FALSE)
sink('./cervix_050.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/cervix/cervix_050.R | no_license | esbgkannan/QSMART | R | false | false | 346 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/cervix.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.4,family="gaussian",standardize=FALSE)
sink('./cervix_050.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
\name{makessp}
\alias{makessp}
\title{
Makes Objects to Fit Smoothing Splines with Parametric Effects
}
\description{
This function creates a list containing the necessary information to fit a smoothing spline with parametric effects (see \code{\link{bigssp}}).
}
\usage{
makessp(formula,data=NULL,type=NULL,nknots=NULL,rparm=NA,
lambdas=NULL,skip.iter=TRUE,se.fit=FALSE,rseed=1234,
gcvopts=NULL,knotcheck=TRUE,thetas=NULL,weights=NULL,
random=NULL,remlalg=c("FS","NR","EM","none"),remliter=500,
remltol=10^-4,remltau=NULL)
}
\arguments{
\item{formula}{
An object of class "\code{formula}": a symbolic description of the model to be fitted (see Details and Examples for more information).
}
\item{data}{
Optional data frame, list, or environment containing the variables in \code{formula}.
}
\item{type}{
List of smoothing spline types for predictors in \code{formula} (see Details). Options include \code{type="cub"} for cubic, \code{type="acub"} for another cubic, \code{type="per"} for cubic periodic, \code{type="tps"} for cubic thin-plate, and \code{type="nom"} for nominal. Use \code{type="prm"} for parametric effect.
}
\item{nknots}{
Two possible options: (a) scalar giving total number of random knots to sample, or (b) vector indexing which rows of \code{data} to use as knots.
}
\item{rparm}{
List of rounding parameters for each predictor. See Details.
}
\item{lambdas}{
Vector of global smoothing parameters to try. Default uses \code{lambdas=10^-c(9:0)}
}
\item{skip.iter}{
Logical indicating whether to skip the iterative smoothing parameter update. Using \code{skip.iter=FALSE} should provide a more optimal solution, but the fitting time may be substantially longer. See Computational Details.
}
\item{se.fit}{
Logical indicating if the standard errors of the fitted values should be estimated.
}
\item{rseed}{
Random seed for knot sampling. Input is ignored if \code{nknots} is an input vector of knot indices. Set \code{rseed=NULL} to obtain a different knot sample each time, or set \code{rseed} to any positive integer to use a different seed than the default.
}
\item{gcvopts}{
Control parameters for optimization. List with 3 elements: (a) \code{maxit}: maximum number of algorithm iterations, (b) \code{gcvtol}: covergence tolerance for iterative GCV update, and (c) \code{alpha}: tuning parameter for GCV minimization. Default: \code{gcvopts=list(maxit=5,gcvtol=10^-5,alpha=1)}
}
\item{knotcheck}{
If \code{TRUE}, only unique knots are used (for stability).
}
\item{thetas}{
List of initial smoothing parameters for each predictor subspace. See Details.
}
\item{weights}{
Vector of positive weights for fitting (default is vector of ones).
}
\item{random}{
Adds random effects to model (see Random Effects section).
}
\item{remlalg}{
REML algorithm for estimating variance components (see Random Effects section). Input is ignored if \code{is.null(random)}.
}
\item{remliter}{
Maximum number of iterations for REML estimation of variance components. Input is ignored if \code{random=NULL}.
}
\item{remltol}{
Convergence tolerance for REML estimation of variance components. Input is ignored if \code{random=NULL}.
}
\item{remltau}{
Initial estimate of variance parameters for REML estimation of variance components. Input is ignored if \code{random=NULL}.
}
}
\details{
See \code{\link{bigssp}} and below example for more details.
}
\value{
An object of class "makessp", which can be input to \code{\link{bigssp}}.
}
\references{
Gu, C. (2013). \emph{Smoothing spline ANOVA models, 2nd edition}. New York: Springer.
Helwig, N. E. (2013). \emph{Fast and stable smoothing spline analysis of variance models for large samples with applications to electroencephalography data analysis}. Unpublished doctoral dissertation. University of Illinois at Urbana-Champaign.
Helwig, N. E. (2016). Efficient estimation of variance components in nonparametric mixed-effects models with large samples. \emph{Statistics and Computing, 26}, 1319-1336.
Helwig, N. E. (2017). \href{http://dx.doi.org/10.3389/fams.2017.00015}{Regression with ordered predictors via ordinal smoothing splines}. Frontiers in Applied Mathematics and Statistics, 3(15), 1-13.
Helwig, N. E. and Ma, P. (2015). Fast and stable multiple smoothing parameter selection in smoothing spline analysis of variance models with large samples. \emph{Journal of Computational and Graphical Statistics, 24}, 715-732.
Helwig, N. E. and Ma, P. (2016). Smoothing spline ANOVA for super-large samples: Scalable computation via rounding parameters. \emph{Statistics and Its Interface, 9}, 433-444.
}
\author{
Nathaniel E. Helwig <helwig@umn.edu>
}
\section{Warning }{
When inputting a "makessp" class object into \code{\link{bigssp}}, the formula input to \code{bigssp} must be a nested version of the original formula input to \code{makessp}. In other words, you cannot add any new effects after a "makessp" object has been created, but you can drop (remove) effects from the model.
}
\examples{
########## EXAMPLE ##########
# function with two continuous predictors
set.seed(773)
myfun <- function(x1v,x2v){
sin(2*pi*x1v) + log(x2v+.1) + cos(pi*(x1v-x2v))
}
x1v <- runif(500)
x2v <- runif(500)
y <- myfun(x1v,x2v) + rnorm(500)
# fit 2 possible models (create information 2 separate times)
system.time({
intmod <- bigssp(y~x1v*x2v,type=list(x1v="cub",x2v="cub"),nknots=50)
addmod <- bigssp(y~x1v+x2v,type=list(x1v="cub",x2v="cub"),nknots=50)
})
# fit 2 possible models (create information 1 time)
system.time({
makemod <- makessp(y~x1v*x2v,type=list(x1v="cub",x2v="cub"),nknots=50)
int2mod <- bigssp(y~x1v*x2v,makemod)
add2mod <- bigssp(y~x1v+x2v,makemod)
})
# check difference (no difference)
crossprod( intmod$fitted.values - int2mod$fitted.values )
crossprod( addmod$fitted.values - add2mod$fitted.values )
}
| /man/makessp.Rd | no_license | cran/bigsplines | R | false | false | 5,922 | rd | \name{makessp}
\alias{makessp}
\title{
Makes Objects to Fit Smoothing Splines with Parametric Effects
}
\description{
This function creates a list containing the necessary information to fit a smoothing spline with parametric effects (see \code{\link{bigssp}}).
}
\usage{
makessp(formula,data=NULL,type=NULL,nknots=NULL,rparm=NA,
lambdas=NULL,skip.iter=TRUE,se.fit=FALSE,rseed=1234,
gcvopts=NULL,knotcheck=TRUE,thetas=NULL,weights=NULL,
random=NULL,remlalg=c("FS","NR","EM","none"),remliter=500,
remltol=10^-4,remltau=NULL)
}
\arguments{
\item{formula}{
An object of class "\code{formula}": a symbolic description of the model to be fitted (see Details and Examples for more information).
}
\item{data}{
Optional data frame, list, or environment containing the variables in \code{formula}.
}
\item{type}{
List of smoothing spline types for predictors in \code{formula} (see Details). Options include \code{type="cub"} for cubic, \code{type="acub"} for another cubic, \code{type="per"} for cubic periodic, \code{type="tps"} for cubic thin-plate, and \code{type="nom"} for nominal. Use \code{type="prm"} for parametric effect.
}
\item{nknots}{
Two possible options: (a) scalar giving total number of random knots to sample, or (b) vector indexing which rows of \code{data} to use as knots.
}
\item{rparm}{
List of rounding parameters for each predictor. See Details.
}
\item{lambdas}{
Vector of global smoothing parameters to try. Default uses \code{lambdas=10^-c(9:0)}
}
\item{skip.iter}{
Logical indicating whether to skip the iterative smoothing parameter update. Using \code{skip.iter=FALSE} should provide a more optimal solution, but the fitting time may be substantially longer. See Computational Details.
}
\item{se.fit}{
Logical indicating if the standard errors of the fitted values should be estimated.
}
\item{rseed}{
Random seed for knot sampling. Input is ignored if \code{nknots} is an input vector of knot indices. Set \code{rseed=NULL} to obtain a different knot sample each time, or set \code{rseed} to any positive integer to use a different seed than the default.
}
\item{gcvopts}{
Control parameters for optimization. List with 3 elements: (a) \code{maxit}: maximum number of algorithm iterations, (b) \code{gcvtol}: covergence tolerance for iterative GCV update, and (c) \code{alpha}: tuning parameter for GCV minimization. Default: \code{gcvopts=list(maxit=5,gcvtol=10^-5,alpha=1)}
}
\item{knotcheck}{
If \code{TRUE}, only unique knots are used (for stability).
}
\item{thetas}{
List of initial smoothing parameters for each predictor subspace. See Details.
}
\item{weights}{
Vector of positive weights for fitting (default is vector of ones).
}
\item{random}{
Adds random effects to model (see Random Effects section).
}
\item{remlalg}{
REML algorithm for estimating variance components (see Random Effects section). Input is ignored if \code{is.null(random)}.
}
\item{remliter}{
Maximum number of iterations for REML estimation of variance components. Input is ignored if \code{random=NULL}.
}
\item{remltol}{
Convergence tolerance for REML estimation of variance components. Input is ignored if \code{random=NULL}.
}
\item{remltau}{
Initial estimate of variance parameters for REML estimation of variance components. Input is ignored if \code{random=NULL}.
}
}
\details{
See \code{\link{bigssp}} and below example for more details.
}
\value{
An object of class "makessp", which can be input to \code{\link{bigssp}}.
}
\references{
Gu, C. (2013). \emph{Smoothing spline ANOVA models, 2nd edition}. New York: Springer.
Helwig, N. E. (2013). \emph{Fast and stable smoothing spline analysis of variance models for large samples with applications to electroencephalography data analysis}. Unpublished doctoral dissertation. University of Illinois at Urbana-Champaign.
Helwig, N. E. (2016). Efficient estimation of variance components in nonparametric mixed-effects models with large samples. \emph{Statistics and Computing, 26}, 1319-1336.
Helwig, N. E. (2017). \href{http://dx.doi.org/10.3389/fams.2017.00015}{Regression with ordered predictors via ordinal smoothing splines}. Frontiers in Applied Mathematics and Statistics, 3(15), 1-13.
Helwig, N. E. and Ma, P. (2015). Fast and stable multiple smoothing parameter selection in smoothing spline analysis of variance models with large samples. \emph{Journal of Computational and Graphical Statistics, 24}, 715-732.
Helwig, N. E. and Ma, P. (2016). Smoothing spline ANOVA for super-large samples: Scalable computation via rounding parameters. \emph{Statistics and Its Interface, 9}, 433-444.
}
\author{
Nathaniel E. Helwig <helwig@umn.edu>
}
\section{Warning }{
When inputting a "makessp" class object into \code{\link{bigssp}}, the formula input to \code{bigssp} must be a nested version of the original formula input to \code{makessp}. In other words, you cannot add any new effects after a "makessp" object has been created, but you can drop (remove) effects from the model.
}
\examples{
########## EXAMPLE ##########
# function with two continuous predictors
set.seed(773)
myfun <- function(x1v,x2v){
sin(2*pi*x1v) + log(x2v+.1) + cos(pi*(x1v-x2v))
}
x1v <- runif(500)
x2v <- runif(500)
y <- myfun(x1v,x2v) + rnorm(500)
# fit 2 possible models (create information 2 separate times)
system.time({
intmod <- bigssp(y~x1v*x2v,type=list(x1v="cub",x2v="cub"),nknots=50)
addmod <- bigssp(y~x1v+x2v,type=list(x1v="cub",x2v="cub"),nknots=50)
})
# fit 2 possible models (create information 1 time)
system.time({
makemod <- makessp(y~x1v*x2v,type=list(x1v="cub",x2v="cub"),nknots=50)
int2mod <- bigssp(y~x1v*x2v,makemod)
add2mod <- bigssp(y~x1v+x2v,makemod)
})
# check difference (no difference)
crossprod( intmod$fitted.values - int2mod$fitted.values )
crossprod( addmod$fitted.values - add2mod$fitted.values )
}
|
library(data.table)
metaData <- fread("data/kuhn2018_metaData.csv")
seedValue <- 25
set.seed(seedValue)
datasetIds <- metaData[, unique(data_id)]
datasetTrainIds <- sample(datasetIds, size = round(0.8 * length(datasetIds)), replace = FALSE)
trainData <- metaData[data_id %in% datasetTrainIds]
testData <- metaData[!(data_id %in% datasetTrainIds)]
fwrite(trainData, quote = FALSE,
file = paste0("data/kuhn2018-train-", seedValue, ".csv"))
fwrite(testData[, -"target"], quote = FALSE,
file = paste0("data/kuhn2018-test-", seedValue, ".csv"))
fwrite(testData[, .(target)], quote = FALSE,
file = paste0("data/kuhn2018-test-labels-", seedValue, ".csv"))
| /SplitTuningData.R | permissive | Jakob-Bach/AGD-Lab-2019-Task-2 | R | false | false | 664 | r | library(data.table)
metaData <- fread("data/kuhn2018_metaData.csv")
seedValue <- 25
set.seed(seedValue)
datasetIds <- metaData[, unique(data_id)]
datasetTrainIds <- sample(datasetIds, size = round(0.8 * length(datasetIds)), replace = FALSE)
trainData <- metaData[data_id %in% datasetTrainIds]
testData <- metaData[!(data_id %in% datasetTrainIds)]
fwrite(trainData, quote = FALSE,
file = paste0("data/kuhn2018-train-", seedValue, ".csv"))
fwrite(testData[, -"target"], quote = FALSE,
file = paste0("data/kuhn2018-test-", seedValue, ".csv"))
fwrite(testData[, .(target)], quote = FALSE,
file = paste0("data/kuhn2018-test-labels-", seedValue, ".csv"))
|
#' Datasets providing building blocks for a location analysis
#'
#' Data used in the geomarketing chapter in Geocomputation with R.
#' See \url{http://geocompr.robinlovelace.net/location.html} for details.
#'
#' @format sf data frame objects
#'
#' @aliases metro_names shops
#' @examples \dontrun{
#' download.file("https://tinyurl.com/ybtpkwxz",
#' destfile = "census.zip", mode = "wb")
#' unzip("census.zip") # unzip the files
#' census_de = readr::read_csv2(list.files(pattern = "Gitter.csv"))
#' }
"census_de"
| /R/location.R | no_license | Nowosad/spDataLarge | R | false | false | 516 | r | #' Datasets providing building blocks for a location analysis
#'
#' Data used in the geomarketing chapter in Geocomputation with R.
#' See \url{http://geocompr.robinlovelace.net/location.html} for details.
#'
#' @format sf data frame objects
#'
#' @aliases metro_names shops
#' @examples \dontrun{
#' download.file("https://tinyurl.com/ybtpkwxz",
#' destfile = "census.zip", mode = "wb")
#' unzip("census.zip") # unzip the files
#' census_de = readr::read_csv2(list.files(pattern = "Gitter.csv"))
#' }
"census_de"
|
library(caret)
total_data <- read.csv("all_data_process.csv")
factor_index <- colnames(total_data)[c(2,26:ncol(total_data))]
source("crate_function.R")
pmm_data <- get_complete_data(total_data[,c(-1,-2)], factor_index = factor_index[-1], imputation_methods = "pmm")
pmm_data <- as.data.frame(sapply(pmm_data, as.numeric))
filite_data <- filiter_variable(pmm_data)
filite_data$y <- total_data$y
pro <- rfe(factor(y)~., filite_data, sizes = seq(8,ncol(filite_data)-1, 2),
rfeControl=rfeControl(functions = rfFuncs, method = "cv"))
train_data <- filite_data[, c("y", pro$optVariables[1:10])]
selected_m <- c("C5.0", "dnn", "knn", "ORFlog", "ranger", "rf")
library(parallel)
library(doParallel)
cl <- makeCluster(4)
registerDoParallel(cl)
t2 <- lapply(selected_m, function(x){trainCall(x, train_data)})
##clusterExport(cl, varlist = "trainCall")
##t2 <- parLapply(cl, selected_m, function(x){trainCall(x, train_data)})
stopCluster(cl)
registerDoSEQ()
lapply(1:length(t2), function(x){printCall(x, selected_m, t2)})
##########tune the model
library(doParallel)
cl <- makeCluster(8)
registerDoParallel(cl)
tunegrid <- expand.grid(.mtry=c(1:15))
modellist <- list()
for (ntree in seq(500,2500,500)) {
set.seed(1234)
fit <- train(factor(y) ~ . ,
data = train_data,
method = "rf",
tuneGrid=tunegrid,
trControl = trainControl(method="cv", number = 5,
allowParallel = TRUE, verbose = TRUE, savePredictions = T))
key <- toString(ntree)
modellist[[key]] <- fit
}
stopCluster(cl)
registerDoSEQ()
rf_ml_finalmodel <- fit$finalModel
plot(fit)
saveRDS(rf_ml_finalmodel, "rf_test.rds")
| /r-class/caret_analyse_script.R | no_license | 404563471/trainning-code | R | false | false | 1,705 | r | library(caret)
total_data <- read.csv("all_data_process.csv")
factor_index <- colnames(total_data)[c(2,26:ncol(total_data))]
source("crate_function.R")
pmm_data <- get_complete_data(total_data[,c(-1,-2)], factor_index = factor_index[-1], imputation_methods = "pmm")
pmm_data <- as.data.frame(sapply(pmm_data, as.numeric))
filite_data <- filiter_variable(pmm_data)
filite_data$y <- total_data$y
pro <- rfe(factor(y)~., filite_data, sizes = seq(8,ncol(filite_data)-1, 2),
rfeControl=rfeControl(functions = rfFuncs, method = "cv"))
train_data <- filite_data[, c("y", pro$optVariables[1:10])]
selected_m <- c("C5.0", "dnn", "knn", "ORFlog", "ranger", "rf")
library(parallel)
library(doParallel)
cl <- makeCluster(4)
registerDoParallel(cl)
t2 <- lapply(selected_m, function(x){trainCall(x, train_data)})
##clusterExport(cl, varlist = "trainCall")
##t2 <- parLapply(cl, selected_m, function(x){trainCall(x, train_data)})
stopCluster(cl)
registerDoSEQ()
lapply(1:length(t2), function(x){printCall(x, selected_m, t2)})
##########tune the model
library(doParallel)
cl <- makeCluster(8)
registerDoParallel(cl)
tunegrid <- expand.grid(.mtry=c(1:15))
modellist <- list()
for (ntree in seq(500,2500,500)) {
set.seed(1234)
fit <- train(factor(y) ~ . ,
data = train_data,
method = "rf",
tuneGrid=tunegrid,
trControl = trainControl(method="cv", number = 5,
allowParallel = TRUE, verbose = TRUE, savePredictions = T))
key <- toString(ntree)
modellist[[key]] <- fit
}
stopCluster(cl)
registerDoSEQ()
rf_ml_finalmodel <- fit$finalModel
plot(fit)
saveRDS(rf_ml_finalmodel, "rf_test.rds")
|
# This functions helps us to remove previously created workspace variables....
rm(list=ls());
# reproduce the result
set.seed(123)
## library imported
library(mlbench)
library(caret)
library(corrplot)
# data from the file
# use to get input .csv file from user
inputDataFile <- readline("please enter data file to be filter with label in extension (.csv): ");
data = read.csv(inputDataFile, header = TRUE)
# initial column count
print ("initial columns")
print (ncol(data) - 1)
# zero std column were removed
nd = Filter(sd, data)
#Column count after removing zero value columns
print ("After removing zero std value columns")
print (ncol(nd) - 1)
# Data normalization (zero mean, unit variance)
x = ncol(nd)
preObj <- preProcess(nd[,2:x ], method=c("center", "scale"))
normalized_Data <- predict(preObj, nd[,2:x])
new_data = normalized_Data;
print ("after removing zero variance columns")
print (ncol(new_data))
# Again insert first column
FinalMatrix = cbind(data[,1],new_data);
# Assign names of first columns
names(FinalMatrix)[1] = names(data[1]);
# final data written to the .csv file.
outputFile <- paste0("NormalizedAndRemovedZeroVar", inputDataFile);
write.csv(FinalMatrix, file = outputFile, row.names = FALSE)
| /SourceCodes/DataNormailzedAndRemovedZeroVarColumn.R | no_license | ranjan1010/DAG_BarmanEtal2019 | R | false | false | 1,306 | r | # This functions helps us to remove previously created workspace variables....
rm(list=ls());
# reproduce the result
set.seed(123)
## library imported
library(mlbench)
library(caret)
library(corrplot)
# data from the file
# use to get input .csv file from user
inputDataFile <- readline("please enter data file to be filter with label in extension (.csv): ");
data = read.csv(inputDataFile, header = TRUE)
# initial column count
print ("initial columns")
print (ncol(data) - 1)
# zero std column were removed
nd = Filter(sd, data)
#Column count after removing zero value columns
print ("After removing zero std value columns")
print (ncol(nd) - 1)
# Data normalization (zero mean, unit variance)
x = ncol(nd)
preObj <- preProcess(nd[,2:x ], method=c("center", "scale"))
normalized_Data <- predict(preObj, nd[,2:x])
new_data = normalized_Data;
print ("after removing zero variance columns")
print (ncol(new_data))
# Again insert first column
FinalMatrix = cbind(data[,1],new_data);
# Assign names of first columns
names(FinalMatrix)[1] = names(data[1]);
# final data written to the .csv file.
outputFile <- paste0("NormalizedAndRemovedZeroVar", inputDataFile);
write.csv(FinalMatrix, file = outputFile, row.names = FALSE)
|
plt <- function( model="M1", rk_c = 1, rk_t=1, rk_t_delay = 0, lg_c = 1, lg_t = 1, seed=3, N=600,
chemo_duration_c=6,
chemo_duration_t=6,
col_c="gray", col_t="black", main="Chemotherapy vs. placebo", xlab="Time (months)",
get_s=get_s_M1, baseline_pars=baseline_m1, mfrow=c(1,3),
treatment_schedule=NULL ){
require( survival )
require( bshazard )
par( family="sans", mfrow=mfrow, cex=1, bty="n", mar=c(3,4.5,1.2,0.2), mgp=c(1.8,.7,0) )
set.seed( seed )
d_control <- truncate_survival( data.frame( time=get_s(
mean=baseline_pars[1], sd=baseline_pars[2],
N=N, raise_killing=rk_c, chemo_duration=chemo_duration_c, lower_growth=lg_c ),
status=1, treatment="C" ) )
set.seed( seed )
d_treatment <- truncate_survival( data.frame( time=get_s( N=N,
mean=baseline_pars[1], sd=baseline_pars[2],
raise_killing=rk_t, treatment_delay=rk_t_delay,
lower_growth=lg_t, chemo_duration=chemo_duration_t ),
status=1, treatment="T" ) )
d <- rbind( d_control, d_treatment )
write.csv(d, file=gzfile(paste0("data/",model,"_",
gsub(" ","_",tolower(gsub("[^[:alnum:] ]", "", main))),".csv.gz")),row.names=FALSE)
fit <- survfit( Surv( time, status ) ~ treatment, d )
plot( fit, xaxt="n", yaxt="n", xlab=xlab, ylab="",
xlim=c(-1.4,24), ylim=c(-.3,1), col=c(col_c,col_t) )
axis( 1, at=seq(0,24,6) )
axis( 2, las=2, at=seq(0,1,.25) )
mtext( paste0(main," (",model,")"), 3, line=-1, adj=0.5, outer=TRUE )
mtext( " Survival", 2, line=3 )
text( -.8, -.05, "T", col=col_t )
text( -.8, -.25, "C", col=col_c )
if( !is.null(treatment_schedule) ){
eval( treatment_schedule )
}
# Estimate the hazard functio non-parametrically from a survival object
fit_control <- bshazard(Surv(time, status)~1, data = d_control, nbin = 48, alpha = .05, lambda=500 )
fit_treatment <- bshazard(Surv(time, status)~1, data = d_treatment, nbin = 48, alpha = .05, lambda=500 )
with( fit_control, {
plot( hazard ~ time, type='l', xlab=xlab, col=col_c,
ylab="Hazard estimate\n ", xlim=c(0,24),
ylim=c(0,max(c(fit_control$upper.ci,fit_treatment$upper.ci))), xaxt="n", yaxt="n" );
polygon( c(time,rev(time)), c(lower.ci,rev(upper.ci)), border=NA, col=t_col(col_c) )
} )
axis( 1, at=seq(0,24,6) )
axis( 2, las=2 )
with( fit_treatment, {
lines( hazard ~ time, col=col_t )
polygon( c(time,rev(time)), c(lower.ci,rev(upper.ci)), border=NA, col=t_col(col_t) )
})
plot( fit_treatment$time, fit_treatment$hazard / fit_control$hazard, type='l',
xlab=xlab, col="gray", ylab="Hazard ratio", log="y", xaxt="n", yaxt="n", ylim=c(0.2,5), lwd=2 )
axis( 1, at=seq(0,24,6) )
axis( 2, las=2 )
arrows( c(24,24), c(1,1), c(24,24), c(3,1/3), length=.05 )
text( 24, 3, "C", adj=c(0.5,-.4) )
text( 24, 1/3, "T", adj=c(0.5,1.4) )
points( 24, mean(fit_treatment$hazard / fit_control$hazard), col="red", pch=19, xpd=TRUE )
abline(h=1, lty=2)
}
plt_series <- function( model, baseline_pars, lower_growth, raise_killing ){
get_s <- get(paste0("get_s_",model))
plt( lg_t=lower_growth, xlab="", get_s=get_s, baseline_pars=baseline_pars, treatment_schedule = {
segments( 0, -.05, 6, -.05, lwd=1.5, lend=1 )
}, model=model )
#plt( rk_t=raise_killing, get_s=get_s, main="Immunotherapy vs. Placebo", col_t="firebrick", xlab="",
# baseline_pars=baseline_pars, treatment_schedule = {
# segments( 0, -.05, 24, -.05, lwd=1.5, lend=1, col="firebrick" )
#} )
plt( rk_t=raise_killing, lg_t=lower_growth, lg_c=lower_growth, get_s=get_s, baseline_pars=baseline_pars,
main="Chemoimmunotherapy vs. Chemotherapy", treatment_schedule = {
segments( 0, -.25, 6, -.25, lwd=2, lend=1 )
segments( 0, -.05+.02, 6, -.05+.02, lwd=1.5, lend=1, col="black" )
segments( 0, -.05-.02, 24, -.05-.02, lwd=1.5, lend=1, col="firebrick" )
}, col_t="darkcyan", col="black", xlab="", model=model )
plt( lg_c=lower_growth, rk_t=raise_killing, main="Immunotherapy vs. Chemotherapy", get_s=get_s, baseline_pars=baseline_pars,
treatment_schedule = {
segments( 0, -.25, 6, -.25, lwd=1.5, lend=1 )
segments( 0, -.05, 24, -.05, lwd=1.5, lend=1, col="firebrick" )
}, col_t="firebrick", col_c="black", xlab="", model=model )
plt( rk_t=raise_killing, lg_t=lower_growth, rk_c=raise_killing, rk_t_delay=6,
main="Induction chemotherapy, followed by immunotherapy vs. Immunotherapy",
get_s=get_s, baseline_pars=baseline_pars, treatment_schedule = {
segments( 0, -.25, 24, -.25, lwd=1.5, lend=1, col="firebrick" )
segments( 0, -.05, 6, -.05, lwd=1.5, lend=1, col="black" )
segments( 6, -.05, 24, -.05, lwd=1.5, lend=1, col="firebrick" )
}, col_t="darkcyan", col_c="firebrick", model=model )
}
| /figures/figure-4/code/helper-simulated-trial.R | permissive | jtextor/insilico-trials | R | false | false | 4,618 | r |
plt <- function( model="M1", rk_c = 1, rk_t=1, rk_t_delay = 0, lg_c = 1, lg_t = 1, seed=3, N=600,
chemo_duration_c=6,
chemo_duration_t=6,
col_c="gray", col_t="black", main="Chemotherapy vs. placebo", xlab="Time (months)",
get_s=get_s_M1, baseline_pars=baseline_m1, mfrow=c(1,3),
treatment_schedule=NULL ){
require( survival )
require( bshazard )
par( family="sans", mfrow=mfrow, cex=1, bty="n", mar=c(3,4.5,1.2,0.2), mgp=c(1.8,.7,0) )
set.seed( seed )
d_control <- truncate_survival( data.frame( time=get_s(
mean=baseline_pars[1], sd=baseline_pars[2],
N=N, raise_killing=rk_c, chemo_duration=chemo_duration_c, lower_growth=lg_c ),
status=1, treatment="C" ) )
set.seed( seed )
d_treatment <- truncate_survival( data.frame( time=get_s( N=N,
mean=baseline_pars[1], sd=baseline_pars[2],
raise_killing=rk_t, treatment_delay=rk_t_delay,
lower_growth=lg_t, chemo_duration=chemo_duration_t ),
status=1, treatment="T" ) )
d <- rbind( d_control, d_treatment )
write.csv(d, file=gzfile(paste0("data/",model,"_",
gsub(" ","_",tolower(gsub("[^[:alnum:] ]", "", main))),".csv.gz")),row.names=FALSE)
fit <- survfit( Surv( time, status ) ~ treatment, d )
plot( fit, xaxt="n", yaxt="n", xlab=xlab, ylab="",
xlim=c(-1.4,24), ylim=c(-.3,1), col=c(col_c,col_t) )
axis( 1, at=seq(0,24,6) )
axis( 2, las=2, at=seq(0,1,.25) )
mtext( paste0(main," (",model,")"), 3, line=-1, adj=0.5, outer=TRUE )
mtext( " Survival", 2, line=3 )
text( -.8, -.05, "T", col=col_t )
text( -.8, -.25, "C", col=col_c )
if( !is.null(treatment_schedule) ){
eval( treatment_schedule )
}
# Estimate the hazard functio non-parametrically from a survival object
fit_control <- bshazard(Surv(time, status)~1, data = d_control, nbin = 48, alpha = .05, lambda=500 )
fit_treatment <- bshazard(Surv(time, status)~1, data = d_treatment, nbin = 48, alpha = .05, lambda=500 )
with( fit_control, {
plot( hazard ~ time, type='l', xlab=xlab, col=col_c,
ylab="Hazard estimate\n ", xlim=c(0,24),
ylim=c(0,max(c(fit_control$upper.ci,fit_treatment$upper.ci))), xaxt="n", yaxt="n" );
polygon( c(time,rev(time)), c(lower.ci,rev(upper.ci)), border=NA, col=t_col(col_c) )
} )
axis( 1, at=seq(0,24,6) )
axis( 2, las=2 )
with( fit_treatment, {
lines( hazard ~ time, col=col_t )
polygon( c(time,rev(time)), c(lower.ci,rev(upper.ci)), border=NA, col=t_col(col_t) )
})
plot( fit_treatment$time, fit_treatment$hazard / fit_control$hazard, type='l',
xlab=xlab, col="gray", ylab="Hazard ratio", log="y", xaxt="n", yaxt="n", ylim=c(0.2,5), lwd=2 )
axis( 1, at=seq(0,24,6) )
axis( 2, las=2 )
arrows( c(24,24), c(1,1), c(24,24), c(3,1/3), length=.05 )
text( 24, 3, "C", adj=c(0.5,-.4) )
text( 24, 1/3, "T", adj=c(0.5,1.4) )
points( 24, mean(fit_treatment$hazard / fit_control$hazard), col="red", pch=19, xpd=TRUE )
abline(h=1, lty=2)
}
plt_series <- function( model, baseline_pars, lower_growth, raise_killing ){
get_s <- get(paste0("get_s_",model))
plt( lg_t=lower_growth, xlab="", get_s=get_s, baseline_pars=baseline_pars, treatment_schedule = {
segments( 0, -.05, 6, -.05, lwd=1.5, lend=1 )
}, model=model )
#plt( rk_t=raise_killing, get_s=get_s, main="Immunotherapy vs. Placebo", col_t="firebrick", xlab="",
# baseline_pars=baseline_pars, treatment_schedule = {
# segments( 0, -.05, 24, -.05, lwd=1.5, lend=1, col="firebrick" )
#} )
plt( rk_t=raise_killing, lg_t=lower_growth, lg_c=lower_growth, get_s=get_s, baseline_pars=baseline_pars,
main="Chemoimmunotherapy vs. Chemotherapy", treatment_schedule = {
segments( 0, -.25, 6, -.25, lwd=2, lend=1 )
segments( 0, -.05+.02, 6, -.05+.02, lwd=1.5, lend=1, col="black" )
segments( 0, -.05-.02, 24, -.05-.02, lwd=1.5, lend=1, col="firebrick" )
}, col_t="darkcyan", col="black", xlab="", model=model )
plt( lg_c=lower_growth, rk_t=raise_killing, main="Immunotherapy vs. Chemotherapy", get_s=get_s, baseline_pars=baseline_pars,
treatment_schedule = {
segments( 0, -.25, 6, -.25, lwd=1.5, lend=1 )
segments( 0, -.05, 24, -.05, lwd=1.5, lend=1, col="firebrick" )
}, col_t="firebrick", col_c="black", xlab="", model=model )
plt( rk_t=raise_killing, lg_t=lower_growth, rk_c=raise_killing, rk_t_delay=6,
main="Induction chemotherapy, followed by immunotherapy vs. Immunotherapy",
get_s=get_s, baseline_pars=baseline_pars, treatment_schedule = {
segments( 0, -.25, 24, -.25, lwd=1.5, lend=1, col="firebrick" )
segments( 0, -.05, 6, -.05, lwd=1.5, lend=1, col="black" )
segments( 6, -.05, 24, -.05, lwd=1.5, lend=1, col="firebrick" )
}, col_t="darkcyan", col_c="firebrick", model=model )
}
|
# dplot3.box.R
# ::rtemis::
# 201-21 E.D. Gennatas lambdamd.org
#' Interactive Boxplots & Violin plots
#'
#' Draw interactive boxplots or violin plots using \pkg{plotly}
#'
#' @param x Vector or List of vectors: Input
#' @param main Character: Plot title. Default = NULL
#' @param xlab Character: x-axis label. Default = NULL
#' @param ylab Character: y-axis label. Default = NULL
#' @param col Color, vector: Color for boxes. Default NULL, which will draw colors from \code{palette}
#' @param alpha Float (0, 1]: Transparency for box colors. Default = .8
#' @param bg Color: Background color. Default = "white"
#' @param plot.bg Color: Background color for plot area. Default = "white"
#' @param theme Character: THeme to use: "light", "dark", "lightgrid", "darkgrid". Default = "lightgrid"
#' @param palette Character: Name of \pkg{rtemis} palette to use. Default = "rtCol1". Only used if \code{col = NULL}
#' @param quartilemethod Character: "linear", "exclusive", "inclusive"
#' @param boxpoints Character or FALSE: "all", "suspectedoutliers", "outliers"
#' See \url{https://plotly.com/r/box-plots/#choosing-the-algorithm-for-computing-quartiles}
#' @param xnames Character, vector, length = NROW(x): x-axis names. Default = NULL, which
#' tries to set names appropriately
#' @param order.by.fn Function: If defined, order boxes by increasing value of this function
#' (e.g. median). Default = NULL
#' @param feature.names Character, vector, length = NCOL(x): Feature names. Default = NULL, which uses
#' \code{colnames(x)}
#' @param font.size Float: Font size for all labels. Default = 16
#' @param legend Logical: If TRUE, draw legend. Default = TRUE
#' @param legend.col Color: Legend text color. Default = NULL, determined by theme
#' @param legend.xy Float, vector, length 2: Relative x, y position for legend. Default = NULL, which places
#' the legend top right beside the plot area. For example, c(0, 1) places the legend top left within the plot area
#' @param xaxis.type Character: "linear", "log", "date", "category", "multicategory"
#' Default = "category"
#' @param margin Named list: plot margins. Default = \code{list(t = 35)}
#'
#' @author E.D. Gennatas
#' @export
#' @examples
#' \dontrun{
#' # A.1 Box plot of 4 variables
#' dplot3.box(iris[, 1:4])
#' # A.2 Grouped Box plot
#' dplot3.box(iris[, 1:4], group = iris$Species)
#' # B. Boxplot split by time periods
#' # Synthetic data with an instantenous shift in distributions
#' set.seed(2021)
#' dat1 <- data.frame(alpha = rnorm(200, 0), beta = rnorm(200, 2), gamma = rnorm(200, 3))
#' dat2 <- data.frame(alpha = rnorm(200, 5), beta = rnorm(200, 8), gamma = rnorm(200, -3))
#' x <- rbind(dat1, dat2)
#' startDate <- as.Date("2019-12-04")
#' endDate <- as.Date("2021-03-31")
#' time <- seq(startDate, endDate, length.out = 400)
#' dplot3.box(x, time, "year")
#' dplot3.box(x, time, "quarter")
#' dplot3.box(x, time, "month")
#' # (Note how the boxplots widen when the period includes data from both dat1 and dat2)
#' }
dplot3.box <- function(x,
time = NULL,
time.bin = c("year", "quarter", "month", "day"),
type = c("box", "violin"),
group = NULL,
main = NULL,
xlab = "",
ylab = NULL,
col = NULL,
alpha = .6,
bg = NULL,
plot.bg = NULL,
theme = getOption("rt.theme", "lightgrid"),
palette = getOption("rt.palette", "rtCol1"),
boxpoints = "outliers",
quartilemethod = "linear",
width = 0,
violin.box = TRUE,
xnames = NULL,
labelify = TRUE,
order.by.fn = NULL,
font.size = 16,
legend = NULL,
legend.col = NULL,
legend.xy = NULL,
xaxis.type = "category",
margin = list(t = 35, pad = 0),
automargin.x = TRUE,
automargin.y = TRUE,
boxgap = NULL,
boxgroupgap = NULL,
displayModeBar = TRUE,
filename = NULL,
file.width = 500,
file.height = 500,
print.plot = TRUE, ...) {
# Dependencies ====
if (!depCheck("plotly", verbose = FALSE)) {
cat("\n"); stop("Please install dependencies and try again")
}
# Arguments ====
type <- match.arg(type)
main <- paste0("<b>", main, "</b>")
# if (!is.list(x)) x <- list(x)
# Convert vector or matrix to list
if (!is.list(x)) {
# x is vector
if (is.numeric(x)) {
.names <- deparse(substitute(x))
x <- list(x)
names(x) <- .names
} else {
.names <- colnames(x)
x <- lapply(seq(NCOL(x)), function(i) x[, i])
names(x) <- .names
}
}
# Order by fn ====
if (!is.null(order.by.fn) && order.by.fn != "none") {
if (is.null(time)) {
if (is.list(x)) {
.order <- order(sapply(x, order.by.fn, na.rm = TRUE))
if (is.data.frame(x)) {
x <- x[, .order]
} else {
x <- x[names(x)[.order]]
}
}
if (!is.null(xnames)) xnames <- xnames[.order]
} else {
warning("Ignoring order.by.fn with time data")
order.by.fn <- NULL
}
}
# Remove non-numeric vectors
# which.nonnum <- which(sapply(x, function(i) !is.numeric(i)))
# if (length(which.nonnum) > 0) x[[which.nonnum]] <- NULL
if (!is.null(group)) group <- factor(group)
n.groups <- if (is.null(group)) length(x) else length(levels(group))
.xnames <- xnames
if (is.null(.xnames)) {
.xnames <- names(x)
if (is.null(.xnames)) .xnames <- paste0("Feature", seq(n.groups))
if (labelify) .xnames <- labelify(.xnames)
}
# Colors ====
if (is.character(palette)) palette <- rtPalette(palette)
if (is.null(col)) col <- recycle(palette, seq(n.groups))[seq(n.groups)]
if (!is.null(order.by.fn) && order.by.fn != "none") {
col <- col[.order]
}
# Theme ====
extraargs <- list(...)
if (is.character(theme)) {
theme <- do.call(paste0("theme_", theme), extraargs)
} else {
for (i in seq(extraargs)) {
theme[[names(extraargs)[i]]] <- extraargs[[i]]
}
}
bg <- plotly::toRGB(theme$bg)
plot.bg <- plotly::toRGB(theme$plot.bg)
grid.col <- plotly::toRGB(theme$grid.col)
tick.col <- plotly::toRGB(theme$tick.labels.col)
labs.col <- plotly::toRGB(theme$labs.col)
main.col <- plotly::toRGB(theme$main.col)
# axes.col <- plotly::toRGB(theme$axes.col)
# Derived
if (is.null(legend.col)) legend.col <- labs.col
if (is.null(time)) {
if (is.null(group)) {
# A.1 Single and multiple boxplots ====
if (is.null(legend)) legend <- FALSE
args <- list(y = x[[1]],
type = type,
name = .xnames[1],
line = list(color = plotly::toRGB(col[1])),
fillcolor = plotly::toRGB(col[1], alpha),
marker = list(color = plotly::toRGB(col[1], alpha)))
if (type == "box") {
args <- c(args, list(quartilemethod = quartilemethod,
boxpoints = boxpoints))
}
if (type == "violin") args$box <- list(visible = violin.box)
plt <- do.call(plotly::plot_ly, args)
if (n.groups > 1) {
for (i in seq_len(n.groups)[-1]) {
plt <- plotly::add_trace(plt, y = x[[i]],
name = .xnames[i],
line = list(color = plotly::toRGB(col[i])),
fillcolor = plotly::toRGB(col[i], alpha),
marker = list(color = plotly::toRGB(col[i], alpha)))
}
}
} else {
# A.2 Grouped boxplots ====
if (is.null(legend)) legend <- TRUE
dt <- cbind(data.table::as.data.table(x), group = group)
dtlong <- data.table::melt(dt[, ID := seq(nrow(dt))], id.vars = c("ID", "group"))
if (is.null(ylab)) ylab <- ""
args <- list(data = dtlong,
type = type,
x = ~variable,
y = ~value,
color = ~group,
colors = col2hex(col))
if (type == "box") {
args <- c(args, list(quartilemethod = quartilemethod,
boxpoints = boxpoints,
alpha = alpha))
}
if (type == "violin") args$box <- list(visible = violin.box)
plt <- do.call(plotly::plot_ly, args) %>%
plotly::layout(boxmode = "group",
xaxis = list(tickvals = 0:(NCOL(dt) - 2),
ticktext = .xnames))
}
} else {
# B. Time-binned boxplots ====
time.bin <- match.arg(time.bin)
if (is.null(xlab)) xlab <- ""
if (is.null(ylab)) ylab <- ""
if (is.null(legend)) legend <- TRUE
dt <- data.table::as.data.table(x)
dt[, timeperiod := factor(switch(time.bin,
year = data.table::year(time),
quarter = paste(data.table::year(time), quarters(time)),
month = paste(data.table::year(time), months(time, TRUE)),
day = time,
))]
## Long data ====
dtlong <- data.table::melt(dt[, ID := seq(nrow(dt))], id.vars = c("ID", "timeperiod"))
# group by
if (!is.null(group)) {
group <- factor(group)
grouplevels <- levels(group)
transforms <- list(
list(
type = 'groupby',
groups = group,
# styles = list(
# list(target = 4, value = list(marker =list(color = 'blue'))),
# list(target = 6, value = list(marker =list(color = 'red'))),
# list(target = 8, value = list(marker =list(color = 'black')))
# )
styles =
lapply(seq_along(grouplevels), function(i) {
list(target = grouplevels[i], value = list(line = list(color = plotly::toRGB(col[i])),
fillcolor = plotly::toRGB(col[i], alpha),
marker = list(color = plotly::toRGB(col[i], alpha)))
)
})
)
)
} else {
transforms <- NULL
}
if (is.null(group)) {
args <- list(data = dtlong,
type = type,
x = ~timeperiod,
y = ~value,
color = ~variable,
colors = col2hex(col))
} else {
args <- list(data = dtlong,
type = type,
x = ~timeperiod,
y = ~value,
# color = if (is.null(group)) ~variable else NULL,
# colors = if (is.null(group)) col2hex(col) else NULL,
transforms = transforms)
}
if (type == "box") {
args <- c(args, list(quartilemethod = quartilemethod,
boxpoints = boxpoints))
}
if (type == "violin") args$box <- list(visible = violin.box)
plt <- do.call(plotly::plot_ly, args) %>%
plotly::layout(boxmode = "group")
}
# layout ====
f <- list(family = theme$font.family,
size = font.size,
color = labs.col)
tickfont <- list(family = theme$font.family,
size = font.size,
color = tick.col)
.legend <- list(x = legend.xy[1],
y = legend.xy[2],
font = list(family = theme$font.family,
size = font.size,
color = legend.col))
plt <- plotly::layout(plt,
yaxis = list(title = ylab,
titlefont = f,
showgrid = theme$grid,
gridcolor = grid.col,
gridwidth = theme$grid.lwd,
tickcolor = grid.col,
tickfont = tickfont,
zeroline = FALSE,
automargin = automargin.y),
xaxis = list(title = xlab,
type = xaxis.type,
titlefont = f,
showgrid = FALSE,
tickcolor = grid.col,
tickfont = tickfont,
automargin = automargin.x),
title = list(text = main,
font = list(family = theme$font.family,
size = font.size,
color = main.col),
xref = 'paper',
x = theme$main.adj),
paper_bgcolor = bg,
plot_bgcolor = plot.bg,
margin = margin,
showlegend = legend,
legend = .legend,
boxgap = boxgap,
boxgroupgap = boxgroupgap)
# Config
plt <- plotly::config(plt,
displaylogo = FALSE,
displayModeBar = displayModeBar)
# Write to file ====
if (!is.null(filename)) {
filename <- file.path(filename)
plotly::plotly_IMAGE(plt, width = file.width, height = file.height,
format = tools::file_ext(file), out_file = filename)
}
if (print.plot) suppressWarnings(print(plt))
invisible(plt)
} # rtemis::dplot3.box.R
| /R/dplot3.box.R | no_license | tlarzg/rtemis | R | false | false | 14,131 | r | # dplot3.box.R
# ::rtemis::
# 201-21 E.D. Gennatas lambdamd.org
#' Interactive Boxplots & Violin plots
#'
#' Draw interactive boxplots or violin plots using \pkg{plotly}
#'
#' @param x Vector or List of vectors: Input
#' @param main Character: Plot title. Default = NULL
#' @param xlab Character: x-axis label. Default = NULL
#' @param ylab Character: y-axis label. Default = NULL
#' @param col Color, vector: Color for boxes. Default NULL, which will draw colors from \code{palette}
#' @param alpha Float (0, 1]: Transparency for box colors. Default = .8
#' @param bg Color: Background color. Default = "white"
#' @param plot.bg Color: Background color for plot area. Default = "white"
#' @param theme Character: THeme to use: "light", "dark", "lightgrid", "darkgrid". Default = "lightgrid"
#' @param palette Character: Name of \pkg{rtemis} palette to use. Default = "rtCol1". Only used if \code{col = NULL}
#' @param quartilemethod Character: "linear", "exclusive", "inclusive"
#' @param boxpoints Character or FALSE: "all", "suspectedoutliers", "outliers"
#' See \url{https://plotly.com/r/box-plots/#choosing-the-algorithm-for-computing-quartiles}
#' @param xnames Character, vector, length = NROW(x): x-axis names. Default = NULL, which
#' tries to set names appropriately
#' @param order.by.fn Function: If defined, order boxes by increasing value of this function
#' (e.g. median). Default = NULL
#' @param feature.names Character, vector, length = NCOL(x): Feature names. Default = NULL, which uses
#' \code{colnames(x)}
#' @param font.size Float: Font size for all labels. Default = 16
#' @param legend Logical: If TRUE, draw legend. Default = TRUE
#' @param legend.col Color: Legend text color. Default = NULL, determined by theme
#' @param legend.xy Float, vector, length 2: Relative x, y position for legend. Default = NULL, which places
#' the legend top right beside the plot area. For example, c(0, 1) places the legend top left within the plot area
#' @param xaxis.type Character: "linear", "log", "date", "category", "multicategory"
#' Default = "category"
#' @param margin Named list: plot margins. Default = \code{list(t = 35)}
#'
#' @author E.D. Gennatas
#' @export
#' @examples
#' \dontrun{
#' # A.1 Box plot of 4 variables
#' dplot3.box(iris[, 1:4])
#' # A.2 Grouped Box plot
#' dplot3.box(iris[, 1:4], group = iris$Species)
#' # B. Boxplot split by time periods
#' # Synthetic data with an instantenous shift in distributions
#' set.seed(2021)
#' dat1 <- data.frame(alpha = rnorm(200, 0), beta = rnorm(200, 2), gamma = rnorm(200, 3))
#' dat2 <- data.frame(alpha = rnorm(200, 5), beta = rnorm(200, 8), gamma = rnorm(200, -3))
#' x <- rbind(dat1, dat2)
#' startDate <- as.Date("2019-12-04")
#' endDate <- as.Date("2021-03-31")
#' time <- seq(startDate, endDate, length.out = 400)
#' dplot3.box(x, time, "year")
#' dplot3.box(x, time, "quarter")
#' dplot3.box(x, time, "month")
#' # (Note how the boxplots widen when the period includes data from both dat1 and dat2)
#' }
dplot3.box <- function(x,
time = NULL,
time.bin = c("year", "quarter", "month", "day"),
type = c("box", "violin"),
group = NULL,
main = NULL,
xlab = "",
ylab = NULL,
col = NULL,
alpha = .6,
bg = NULL,
plot.bg = NULL,
theme = getOption("rt.theme", "lightgrid"),
palette = getOption("rt.palette", "rtCol1"),
boxpoints = "outliers",
quartilemethod = "linear",
width = 0,
violin.box = TRUE,
xnames = NULL,
labelify = TRUE,
order.by.fn = NULL,
font.size = 16,
legend = NULL,
legend.col = NULL,
legend.xy = NULL,
xaxis.type = "category",
margin = list(t = 35, pad = 0),
automargin.x = TRUE,
automargin.y = TRUE,
boxgap = NULL,
boxgroupgap = NULL,
displayModeBar = TRUE,
filename = NULL,
file.width = 500,
file.height = 500,
print.plot = TRUE, ...) {
# Dependencies ====
if (!depCheck("plotly", verbose = FALSE)) {
cat("\n"); stop("Please install dependencies and try again")
}
# Arguments ====
type <- match.arg(type)
main <- paste0("<b>", main, "</b>")
# if (!is.list(x)) x <- list(x)
# Convert vector or matrix to list
if (!is.list(x)) {
# x is vector
if (is.numeric(x)) {
.names <- deparse(substitute(x))
x <- list(x)
names(x) <- .names
} else {
.names <- colnames(x)
x <- lapply(seq(NCOL(x)), function(i) x[, i])
names(x) <- .names
}
}
# Order by fn ====
if (!is.null(order.by.fn) && order.by.fn != "none") {
if (is.null(time)) {
if (is.list(x)) {
.order <- order(sapply(x, order.by.fn, na.rm = TRUE))
if (is.data.frame(x)) {
x <- x[, .order]
} else {
x <- x[names(x)[.order]]
}
}
if (!is.null(xnames)) xnames <- xnames[.order]
} else {
warning("Ignoring order.by.fn with time data")
order.by.fn <- NULL
}
}
# Remove non-numeric vectors
# which.nonnum <- which(sapply(x, function(i) !is.numeric(i)))
# if (length(which.nonnum) > 0) x[[which.nonnum]] <- NULL
if (!is.null(group)) group <- factor(group)
n.groups <- if (is.null(group)) length(x) else length(levels(group))
.xnames <- xnames
if (is.null(.xnames)) {
.xnames <- names(x)
if (is.null(.xnames)) .xnames <- paste0("Feature", seq(n.groups))
if (labelify) .xnames <- labelify(.xnames)
}
# Colors ====
if (is.character(palette)) palette <- rtPalette(palette)
if (is.null(col)) col <- recycle(palette, seq(n.groups))[seq(n.groups)]
if (!is.null(order.by.fn) && order.by.fn != "none") {
col <- col[.order]
}
# Theme ====
extraargs <- list(...)
if (is.character(theme)) {
theme <- do.call(paste0("theme_", theme), extraargs)
} else {
for (i in seq(extraargs)) {
theme[[names(extraargs)[i]]] <- extraargs[[i]]
}
}
bg <- plotly::toRGB(theme$bg)
plot.bg <- plotly::toRGB(theme$plot.bg)
grid.col <- plotly::toRGB(theme$grid.col)
tick.col <- plotly::toRGB(theme$tick.labels.col)
labs.col <- plotly::toRGB(theme$labs.col)
main.col <- plotly::toRGB(theme$main.col)
# axes.col <- plotly::toRGB(theme$axes.col)
# Derived
if (is.null(legend.col)) legend.col <- labs.col
if (is.null(time)) {
if (is.null(group)) {
# A.1 Single and multiple boxplots ====
if (is.null(legend)) legend <- FALSE
args <- list(y = x[[1]],
type = type,
name = .xnames[1],
line = list(color = plotly::toRGB(col[1])),
fillcolor = plotly::toRGB(col[1], alpha),
marker = list(color = plotly::toRGB(col[1], alpha)))
if (type == "box") {
args <- c(args, list(quartilemethod = quartilemethod,
boxpoints = boxpoints))
}
if (type == "violin") args$box <- list(visible = violin.box)
plt <- do.call(plotly::plot_ly, args)
if (n.groups > 1) {
for (i in seq_len(n.groups)[-1]) {
plt <- plotly::add_trace(plt, y = x[[i]],
name = .xnames[i],
line = list(color = plotly::toRGB(col[i])),
fillcolor = plotly::toRGB(col[i], alpha),
marker = list(color = plotly::toRGB(col[i], alpha)))
}
}
} else {
# A.2 Grouped boxplots ====
if (is.null(legend)) legend <- TRUE
dt <- cbind(data.table::as.data.table(x), group = group)
dtlong <- data.table::melt(dt[, ID := seq(nrow(dt))], id.vars = c("ID", "group"))
if (is.null(ylab)) ylab <- ""
args <- list(data = dtlong,
type = type,
x = ~variable,
y = ~value,
color = ~group,
colors = col2hex(col))
if (type == "box") {
args <- c(args, list(quartilemethod = quartilemethod,
boxpoints = boxpoints,
alpha = alpha))
}
if (type == "violin") args$box <- list(visible = violin.box)
plt <- do.call(plotly::plot_ly, args) %>%
plotly::layout(boxmode = "group",
xaxis = list(tickvals = 0:(NCOL(dt) - 2),
ticktext = .xnames))
}
} else {
# B. Time-binned boxplots ====
time.bin <- match.arg(time.bin)
if (is.null(xlab)) xlab <- ""
if (is.null(ylab)) ylab <- ""
if (is.null(legend)) legend <- TRUE
dt <- data.table::as.data.table(x)
dt[, timeperiod := factor(switch(time.bin,
year = data.table::year(time),
quarter = paste(data.table::year(time), quarters(time)),
month = paste(data.table::year(time), months(time, TRUE)),
day = time,
))]
## Long data ====
dtlong <- data.table::melt(dt[, ID := seq(nrow(dt))], id.vars = c("ID", "timeperiod"))
# group by
if (!is.null(group)) {
group <- factor(group)
grouplevels <- levels(group)
transforms <- list(
list(
type = 'groupby',
groups = group,
# styles = list(
# list(target = 4, value = list(marker =list(color = 'blue'))),
# list(target = 6, value = list(marker =list(color = 'red'))),
# list(target = 8, value = list(marker =list(color = 'black')))
# )
styles =
lapply(seq_along(grouplevels), function(i) {
list(target = grouplevels[i], value = list(line = list(color = plotly::toRGB(col[i])),
fillcolor = plotly::toRGB(col[i], alpha),
marker = list(color = plotly::toRGB(col[i], alpha)))
)
})
)
)
} else {
transforms <- NULL
}
if (is.null(group)) {
args <- list(data = dtlong,
type = type,
x = ~timeperiod,
y = ~value,
color = ~variable,
colors = col2hex(col))
} else {
args <- list(data = dtlong,
type = type,
x = ~timeperiod,
y = ~value,
# color = if (is.null(group)) ~variable else NULL,
# colors = if (is.null(group)) col2hex(col) else NULL,
transforms = transforms)
}
if (type == "box") {
args <- c(args, list(quartilemethod = quartilemethod,
boxpoints = boxpoints))
}
if (type == "violin") args$box <- list(visible = violin.box)
plt <- do.call(plotly::plot_ly, args) %>%
plotly::layout(boxmode = "group")
}
# layout ====
f <- list(family = theme$font.family,
size = font.size,
color = labs.col)
tickfont <- list(family = theme$font.family,
size = font.size,
color = tick.col)
.legend <- list(x = legend.xy[1],
y = legend.xy[2],
font = list(family = theme$font.family,
size = font.size,
color = legend.col))
plt <- plotly::layout(plt,
yaxis = list(title = ylab,
titlefont = f,
showgrid = theme$grid,
gridcolor = grid.col,
gridwidth = theme$grid.lwd,
tickcolor = grid.col,
tickfont = tickfont,
zeroline = FALSE,
automargin = automargin.y),
xaxis = list(title = xlab,
type = xaxis.type,
titlefont = f,
showgrid = FALSE,
tickcolor = grid.col,
tickfont = tickfont,
automargin = automargin.x),
title = list(text = main,
font = list(family = theme$font.family,
size = font.size,
color = main.col),
xref = 'paper',
x = theme$main.adj),
paper_bgcolor = bg,
plot_bgcolor = plot.bg,
margin = margin,
showlegend = legend,
legend = .legend,
boxgap = boxgap,
boxgroupgap = boxgroupgap)
# Config
plt <- plotly::config(plt,
displaylogo = FALSE,
displayModeBar = displayModeBar)
# Write to file ====
if (!is.null(filename)) {
filename <- file.path(filename)
plotly::plotly_IMAGE(plt, width = file.width, height = file.height,
format = tools::file_ext(file), out_file = filename)
}
if (print.plot) suppressWarnings(print(plt))
invisible(plt)
} # rtemis::dplot3.box.R
|
# WCS REPORT -------------------------------------------------------
download_wcs <- function(url,temp = "k:/dept/DIGITAL E-COMMERCE/E-COMMERCE/Report E-Commerce/data_lake/temp/temp.xlsx", remove_temporary = T){
h <- new_handle()
handle_setopt(h, ssl_verifypeer = F)
curl_download(url, temp, handle = h)
wcs <- read_xlsx(temp, sheet = 1, guess_max = 70000)
if(remove_temporary){
file.remove(temp)
}
wcs
}
| /scripts/helpers/wcs_helpers_functions.R | no_license | marcoscattolin/data_lake_push | R | false | false | 524 | r |
# WCS REPORT -------------------------------------------------------
download_wcs <- function(url,temp = "k:/dept/DIGITAL E-COMMERCE/E-COMMERCE/Report E-Commerce/data_lake/temp/temp.xlsx", remove_temporary = T){
h <- new_handle()
handle_setopt(h, ssl_verifypeer = F)
curl_download(url, temp, handle = h)
wcs <- read_xlsx(temp, sheet = 1, guess_max = 70000)
if(remove_temporary){
file.remove(temp)
}
wcs
}
|
context("CSVY import using read_csvy()")
library("datasets")
test_that("Basic import from CSVY", {
d1 <- read_csvy(system.file("examples", "example1.csvy", package = "csvy"))
expect_true(inherits(d1, "data.frame"))
d3 <- read_csvy(system.file("examples", "example3.csvy", package = "csvy"))
expect_true(identical(dim(d3), c(2L, 3L)))
d4 <- read.csv(system.file("examples", "example3.csvy", package = "csvy"), comment.char = "#")
expect_true(identical(dim(d4), c(2L, 3L)))
})
test_that("Import from CSVY with separate yaml header", {
tmp_csvy <- tempfile(fileext = ".csv")
tmp_yaml <- tempfile(fileext = ".yaml")
write_csvy(iris, file = tmp_csvy, metadata = tmp_yaml)
expect_true(inherits(read_csvy(tmp_csvy, metadata = tmp_yaml), "data.frame"))
unlink(tmp_csvy)
unlink(tmp_yaml)
})
| /tests/testthat/test-read_csvy.R | no_license | jonocarroll/csvy | R | false | false | 842 | r | context("CSVY import using read_csvy()")
library("datasets")
test_that("Basic import from CSVY", {
d1 <- read_csvy(system.file("examples", "example1.csvy", package = "csvy"))
expect_true(inherits(d1, "data.frame"))
d3 <- read_csvy(system.file("examples", "example3.csvy", package = "csvy"))
expect_true(identical(dim(d3), c(2L, 3L)))
d4 <- read.csv(system.file("examples", "example3.csvy", package = "csvy"), comment.char = "#")
expect_true(identical(dim(d4), c(2L, 3L)))
})
test_that("Import from CSVY with separate yaml header", {
tmp_csvy <- tempfile(fileext = ".csv")
tmp_yaml <- tempfile(fileext = ".yaml")
write_csvy(iris, file = tmp_csvy, metadata = tmp_yaml)
expect_true(inherits(read_csvy(tmp_csvy, metadata = tmp_yaml), "data.frame"))
unlink(tmp_csvy)
unlink(tmp_yaml)
})
|
MonsterLifeData <- "https://raw.githubusercontent.com/ParkKyuSeon/Maplestory_DPM/master/data/monsterlifedata.csv"
MonsterLifeData <- read.csv(MonsterLifeData, header=T, row.names=1, stringsAsFactors=F, encoding="EUC-KR")
MonsterLifeSpecs <- function(MonsterLifeData, Monsters) {
MLSet <- MonsterLifeData[1, ]
MLSet <- MLSet[-1, ]
for(i in 1:length(Monsters)) {
MLSet <- rbind(MLSet, subset(MonsterLifeData, rownames(MonsterLifeData)==Monsters[i]))
}
for(i in 1:nrow(MLSet)) {
if(MLSet$SpecialCondition[i]==1 & nrow(rbind(subset(MLSet, rownames(MLSet)=="SleepyViking"),
subset(MLSet, rownames(MLSet)=="TiredViking"),
subset(MLSet, rownames(MLSet)=="EnoughViking"),
subset(MLSet, rownames(MLSet)=="SeriousViking"))) < 4) {
MLSet[i, 3:(ncol(MLSet)-1)] <- 0
}
if(MLSet$SpecialCondition[i]==2 & nrow(subset(MLSet, rownames(MLSet)=="Eunwol")) < 1) {
MLSet[i, 3:(ncol(MLSet)-1)] <- 0
}
if(MLSet$SpecialCondition[i]==3 & nrow(rbind(subset(MLSet, rownames(MLSet)=="DarkLumi"),
subset(MLSet, rownames(MLSet)=="EquilLumi"),
subset(MLSet, rownames(MLSet)=="LightLumi"))) < 3) {
MLSet[i, 3:(ncol(MLSet)-1)] <- 0
}
}
Categories <- unique(MLSet$Category)
MLSetFinal <- MLSet[1, ]
MLSetFinal <- MLSetFinal[-1, ]
for(i in 1:length(Categories)) {
MLSet1 <- subset(MLSet, MLSet$Category==Categories[i])
if(Categories[i]!="Special") {
if(nrow(subset(MLSet1, MLSet1$Rank=="SS")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="SS")[1, ])
} else if(nrow(subset(MLSet1, MLSet1$Rank=="S")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="S")[1, ])
} else if(nrow(subset(MLSet1, MLSet1$Rank=="A+")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="A+")[1, ])
} else if(nrow(subset(MLSet1, MLSet1$Rank=="A")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="A")[1, ])
} else if(nrow(subset(MLSet1, MLSet1$Rank=="B+")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="B+")[1, ])
} else if(nrow(subset(MLSet1, MLSet1$Rank=="B")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="B")[1, ])
} else {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="C")[1, ])
}
} else {
MLSetFinal <- rbind(MLSetFinal, MLSet1)
}
}
MLSetFinal <- rbind(MLSetFinal, MLSetFinal[nrow(MLSetFinal), ])
rownames(MLSetFinal)[nrow(MLSetFinal)] <- "Sum"
MLSetFinal[nrow(MLSetFinal), 3:ncol(MLSetFinal)] <- 0
for(i in 3:(ncol(MLSetFinal)-1)) {
if(colnames(MLSetFinal)[i]!="IGR") {
MLSetFinal[nrow(MLSetFinal), i] <- sum(MLSetFinal[, i])
} else {
MLSetFinal[nrow(MLSetFinal), i] <- IGRCalc(c(MLSetFinal[1:(nrow(MLSetFinal)-1), i]))
}
}
return(MLSetFinal[nrow(MLSetFinal), 3:(ncol(MLSetFinal)-1)])
}
## Monster Life Preset (6~8 Empty Slots : 8 in level 1, 7 in level 2, 6 in level 3)
### Farm Level 21(22 Slots) + No Bigeup, Serf, MiniSpider, LightLumi, PinkBean, SS mix monsters
#### STR Type 1-1 : STR, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=F
MLTypeS11 <- MonsterLifeSpecs(MonsterLifeData, c("FrankenRoid", "Leica", "ReeperSpecter", "EliteBloodTooth",
"VonLeon", "VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking", "Cygnus", "BlackViking",
"Hilla", "Akairum", "PapulatusClock", "Beril", "Oberon", "ReinforcedBeril",
"WolmyoThief", "ToyKnight", "IncongruitySoul", "YetiPharaoh"))
#### Shinsoo, Timer
#### STR Type 1-2 : STR, SummonedDuration=F, BuffDuration=F, FinalATKDMR=T
### Farm Level 30(26 Slots) + No Bigeup, Serf, MiniSpider, LightLumi, Pierre, VonBon
#### STR Type 2-1 : STR, SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeS21 <- MonsterLifeSpecs(MonsterLifeData, c("FrankenRoid", "ReeperSpecter", "Leica", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "AkairumPriest", "PapulatusClock", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### STR Type 2-2 : STR, SummonedDuration=F, FinalATKDMR=T, CRR=F
MLTypeS22 <- MonsterLifeSpecs(MonsterLifeData, c("FrankenRoid", "ReeperSpecter", "Leica", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "Puso", "AkairumPriest", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### STR Type 2-3 : STR, SummonedDuration=T, FinalATKDMR=F, CRR=F
MLTypeS23 <- MonsterLifeSpecs(MonsterLifeData, c("FrankenRoid", "ReeperSpecter", "Leica", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "GoldYeti", "CoupleYeti", "AkairumPriest", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### HP Type 2-1 : HP, SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeH21 <- MonsterLifeSpecs(MonsterLifeData, c("ModifiedFireBoar", "Dodo", "Leica", "NineTailedFox", "AkairumPriest",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"CoupleYeti", "Oberon", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli", "RomantistKingSlime",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### DEX Type 2-1 : DEX, SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeD21 <- MonsterLifeSpecs(MonsterLifeData, c("Lilinoch", "Taeryun", "AkairumPriest", "Papulatus", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "PapulatusClock", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### DEX Type 2-2 : DEX, SummonedDuration=T, FinalATKDMR=F, CRR=F
MLTypeD22 <- MonsterLifeSpecs(MonsterLifeData, c("Lilinoch", "Taeryun", "AkairumPriest", "Papulatus", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "GoldYeti", "CoupleYeti", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### DEX Type 2-3 : DEX, SummonedDuration=F, FinalATKDMR=T, CRR=F
MLTypeD23 <- MonsterLifeSpecs(MonsterLifeData, c("Lilinoch", "Taeryun", "AkairumPriest", "Papulatus", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "Puso", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### DEX Type 2-4 : DEX, SummonedDuration=T, FinalATKDMR=T, CRR=F
MLTypeD24 <- MonsterLifeSpecs(MonsterLifeData, c("Lilinoch", "Taeryun", "AkairumPriest", "CoupleYeti", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "GoldYeti", "Puso", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### INT Type 2-1 : INT, SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeI21 <- MonsterLifeSpecs(MonsterLifeData, c("Timer", "MachineMT09", "ReeperSpecter", "AkairumPriest", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "PapulatusClock", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### INT Type 2-2 : INT, SummonedDuration=T, FinalATKDMR=F, CRR=F
MLTypeI22 <- MonsterLifeSpecs(MonsterLifeData, c("Timer", "MachineMT09", "ReeperSpecter", "AkairumPriest", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "GoldYeti", "CoupleYeti", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### LUK Type 2-1 : LUK, SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeL21 <- MonsterLifeSpecs(MonsterLifeData, c("Dunas", "Hogeol", "Papulatus", "LightSoul", "AkairumPriest", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### LUK Type 2-2 : LUK, SummonedDuration=T, FinalATKDMR=F, CRR=F
MLTypeL22 <- MonsterLifeSpecs(MonsterLifeData, c("Dunas", "Hogeol", "Papulatus", "LightSoul", "AkairumPriest", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"CoupleYeti", "GoldYeti", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### Allstat Type 2-1 : ALLSTAT(Xenon), SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeA21 <- MonsterLifeSpecs(MonsterLifeData, c("Hogeol", "Leica", "Papulatus", "Taeryun", "AkairumPriest", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul", "YetiPharaoh"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
### Farm Level 40(28 Slots)
#### STR Type 3-1 : STR, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeS31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "MasterRednug",
"Bigeup", "IncongruitySoul", "FrankenRoid", "Leica", "ReeperSpecter",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox", "KingSlime"))
#### EliteBloodTooth, DemonWarrior, ThiefCrow, Ifrit
#### STR Type 3-2 : STR, SummonedDuration=F, BuffDuration=T, FinalATKDMR=F, CRR=F
MLTypeS32 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum",
"Lazuli", "Tinman", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "MasterRednug",
"IncongruitySoul", "FrankenRoid", "Leica", "DemonWarrior",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox", "PapulatusClock"))
#### EliteBloodTooth, AkairumPriest, Victor, Ifrit
#### STR Type 3-3 : STR, SummonedDuration=F, BuffDuration=F, FinalATKDMR=T, CRR=T
MLTypeS33 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "Pierre",
"Puso", "Bigeup", "IncongruitySoul", "FrankenRoid", "Leica",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox", "Targa"))
#### EliteBloodTooth, KingSlime, ThiefCrow, Ifrit
#### STR Type 3-5 : STR, SummonedDuration=T, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeS35 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "BigPumpkin",
"CoupleYeti", "GoldYeti", "IncongruitySoul", "FrankenRoid", "Leica",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox", "KingSlime"))
#### EliteBloodTooth, Giant, ThiefCrow, Ifrit
#### HP Type 3-1 : HP(DemonAvenger), SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeH31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "GiantDarkSoul", "InnerRage", "Tinman", "SmallBalloon", "KingCastleGolem",
"Bigeup", "IncongruitySoul", "Dodo", "ModifiedFireBoar",
"ToyKnight", "Oberon", "SeriousViking", "KingSlime", "NineTailedFox", "Giant"))
#### EliteBloodTooth, StrangeMonster, ThiefCrow, PrimeMinister
#### DEX Type 3-1 : DEX, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeD31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "MasterRelic",
"Bigeup", "IncongruitySoul", "Lilinoch", "Taeryun", "AkairumPriest",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox"))
#### EliteBloodTooth, KingSlime, GuwaruVestige, Ifrit
#### DEX Type 3-2 : DEX, SummonedDuration=F, BuffDuration=F, FinalATKDMR=T, CRR=T
MLTypeD32 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "Pierre",
"Puso", "Bigeup", "IncongruitySoul", "Lilinoch", "Taeryun", "AkairumPriest",
"Targa", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox"))
#### EliteBloodTooth, KingSlime, ThiefCrow, Ifrit
#### DEX Type 3-3 : DEX, SummonedDuration=T, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeD33 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "BigPumpkin",
"CoupleYeti", "GoldYeti", "IncongruitySoul", "Lilinoch", "Taeryun",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox", "KingSlime"))
#### EliteBloodTooth, Giant, ThiefCrow, Ifrit
#### DEX Type 3-4 : DEX, SummonedDuration=T, BuffDuration=F, FinalATKDMR=T, CRR=F
MLTypeD34 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "Tinman", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "Pierre",
"CoupleYeti", "GoldYeti", "IncongruitySoul", "Lilinoch", "Taeryun", "PapulatusClock",
"Targa", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox"))
#### EliteBloodTooth, Victor, ThiefCrow, Ifrit
#### INT Type 3-1 : INT, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeI31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "MasterMargana",
"Bigeup", "IncongruitySoul", "Timer", "MachineMT09", "AkairumPriest", "DemonMagician",
"ToyKnight", "Oberon", "WolmyoThief", "NineTailedFox", "Ifrit"))
#### EliteBloodTooth, KingSlime, ThiefCrow, SeriousViking
#### INT Type 3-2 : INT, SummonedDuration=F, BuffDuration=T, FinalATKDMR=F, CRR=T
MLTypeI32 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "Will", "VonBon", "HugeSpider",
"MiniSpider", "IncongruitySoul", "Timer", "MachineMT09", "AkairumPriest", "ReeperSpecter",
"ToyKnight", "Oberon", "WolmyoThief", "NineTailedFox"))
#### SeriousViking, EliteBloodTooth, KingSlime, Grief
#### INT Type 3-3 : INT, SummonedDuration=F, BuffDuration=T, FinalATKDMR=F, CRR=F
MLTypeI33 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum",
"Lazuli", "Tinman", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "Will", "VonBon", "HugeSpider",
"MiniSpider", "IncongruitySoul", "Timer", "MachineMT09", "AkairumPriest", "PapulatusClock",
"ToyKnight", "Oberon", "WolmyoThief", "NineTailedFox"))
#### SeriousViking, EliteBloodTooth, Victor, Grief
#### INT Type 3-5 : INT, SummonedDuration=T, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeI35 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "BigPumpkin",
"CoupleYeti", "GoldYeti", "IncongruitySoul", "Timer", "MachineMT09",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "Ifrit", "NineTailedFox"))
#### EliteBloodTooth, Giant, ThiefCrow, KingSlime
#### LUK Type 3-1 : LUK, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeL31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "MasterHisab",
"Bigeup", "IncongruitySoul", "Dunas", "Hogeol", "Papulatus",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "Ifrit", "NineTailedFox"))
#### EliteBloodTooth, ThiefCrow, KingSlime, Ergoth
#### LUK Type 3-2 : LUK, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T, CoolTimeReset=T
MLTypeL32 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "Lapis", "SmallBalloon", "BigBalloon",
"Bigeup", "IncongruitySoul", "Dunas", "Hogeol", "Papulatus",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "Ifrit", "NineTailedFox"))
#### EliteBloodTooth, ThiefCrow, KingSlime, PrimeMinister
#### LUK Type 3-3 : LUK, SummonedDuration=T, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeL33 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "BigPumpkin",
"CoupleYeti", "GoldYeti", "IncongruitySoul", "Dunas", "Hogeol", "NineTailedFox",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "Ifrit"))
#### EliteBloodTooth, Giant, ThiefCrow, KingSlime
#### Allstat Type 3-1 : ALLSTAT(Xenon), SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=F
MLTypeA31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "Tinman",
"Bigeup", "IncongruitySoul", "Beril", "PapulatusClock",
"ToyKnight", "Oberon", "Victor", "SeriousViking", "WolmyoThief", "NineTailedFox"))
#### EliteBloodTooth, AkariumPriest, Ifrit, KingSlime
| /base/MonsterLife.R | no_license | MapleStory-Archive/Maplestory_DPM | R | false | false | 25,863 | r | MonsterLifeData <- "https://raw.githubusercontent.com/ParkKyuSeon/Maplestory_DPM/master/data/monsterlifedata.csv"
MonsterLifeData <- read.csv(MonsterLifeData, header=T, row.names=1, stringsAsFactors=F, encoding="EUC-KR")
MonsterLifeSpecs <- function(MonsterLifeData, Monsters) {
MLSet <- MonsterLifeData[1, ]
MLSet <- MLSet[-1, ]
for(i in 1:length(Monsters)) {
MLSet <- rbind(MLSet, subset(MonsterLifeData, rownames(MonsterLifeData)==Monsters[i]))
}
for(i in 1:nrow(MLSet)) {
if(MLSet$SpecialCondition[i]==1 & nrow(rbind(subset(MLSet, rownames(MLSet)=="SleepyViking"),
subset(MLSet, rownames(MLSet)=="TiredViking"),
subset(MLSet, rownames(MLSet)=="EnoughViking"),
subset(MLSet, rownames(MLSet)=="SeriousViking"))) < 4) {
MLSet[i, 3:(ncol(MLSet)-1)] <- 0
}
if(MLSet$SpecialCondition[i]==2 & nrow(subset(MLSet, rownames(MLSet)=="Eunwol")) < 1) {
MLSet[i, 3:(ncol(MLSet)-1)] <- 0
}
if(MLSet$SpecialCondition[i]==3 & nrow(rbind(subset(MLSet, rownames(MLSet)=="DarkLumi"),
subset(MLSet, rownames(MLSet)=="EquilLumi"),
subset(MLSet, rownames(MLSet)=="LightLumi"))) < 3) {
MLSet[i, 3:(ncol(MLSet)-1)] <- 0
}
}
Categories <- unique(MLSet$Category)
MLSetFinal <- MLSet[1, ]
MLSetFinal <- MLSetFinal[-1, ]
for(i in 1:length(Categories)) {
MLSet1 <- subset(MLSet, MLSet$Category==Categories[i])
if(Categories[i]!="Special") {
if(nrow(subset(MLSet1, MLSet1$Rank=="SS")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="SS")[1, ])
} else if(nrow(subset(MLSet1, MLSet1$Rank=="S")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="S")[1, ])
} else if(nrow(subset(MLSet1, MLSet1$Rank=="A+")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="A+")[1, ])
} else if(nrow(subset(MLSet1, MLSet1$Rank=="A")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="A")[1, ])
} else if(nrow(subset(MLSet1, MLSet1$Rank=="B+")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="B+")[1, ])
} else if(nrow(subset(MLSet1, MLSet1$Rank=="B")) >= 1) {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="B")[1, ])
} else {
MLSetFinal <- rbind(MLSetFinal, subset(MLSet1, MLSet1$Rank=="C")[1, ])
}
} else {
MLSetFinal <- rbind(MLSetFinal, MLSet1)
}
}
MLSetFinal <- rbind(MLSetFinal, MLSetFinal[nrow(MLSetFinal), ])
rownames(MLSetFinal)[nrow(MLSetFinal)] <- "Sum"
MLSetFinal[nrow(MLSetFinal), 3:ncol(MLSetFinal)] <- 0
for(i in 3:(ncol(MLSetFinal)-1)) {
if(colnames(MLSetFinal)[i]!="IGR") {
MLSetFinal[nrow(MLSetFinal), i] <- sum(MLSetFinal[, i])
} else {
MLSetFinal[nrow(MLSetFinal), i] <- IGRCalc(c(MLSetFinal[1:(nrow(MLSetFinal)-1), i]))
}
}
return(MLSetFinal[nrow(MLSetFinal), 3:(ncol(MLSetFinal)-1)])
}
## Monster Life Preset (6~8 Empty Slots : 8 in level 1, 7 in level 2, 6 in level 3)
### Farm Level 21(22 Slots) + No Bigeup, Serf, MiniSpider, LightLumi, PinkBean, SS mix monsters
#### STR Type 1-1 : STR, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=F
MLTypeS11 <- MonsterLifeSpecs(MonsterLifeData, c("FrankenRoid", "Leica", "ReeperSpecter", "EliteBloodTooth",
"VonLeon", "VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking", "Cygnus", "BlackViking",
"Hilla", "Akairum", "PapulatusClock", "Beril", "Oberon", "ReinforcedBeril",
"WolmyoThief", "ToyKnight", "IncongruitySoul", "YetiPharaoh"))
#### Shinsoo, Timer
#### STR Type 1-2 : STR, SummonedDuration=F, BuffDuration=F, FinalATKDMR=T
### Farm Level 30(26 Slots) + No Bigeup, Serf, MiniSpider, LightLumi, Pierre, VonBon
#### STR Type 2-1 : STR, SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeS21 <- MonsterLifeSpecs(MonsterLifeData, c("FrankenRoid", "ReeperSpecter", "Leica", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "AkairumPriest", "PapulatusClock", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### STR Type 2-2 : STR, SummonedDuration=F, FinalATKDMR=T, CRR=F
MLTypeS22 <- MonsterLifeSpecs(MonsterLifeData, c("FrankenRoid", "ReeperSpecter", "Leica", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "Puso", "AkairumPriest", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### STR Type 2-3 : STR, SummonedDuration=T, FinalATKDMR=F, CRR=F
MLTypeS23 <- MonsterLifeSpecs(MonsterLifeData, c("FrankenRoid", "ReeperSpecter", "Leica", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "GoldYeti", "CoupleYeti", "AkairumPriest", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### HP Type 2-1 : HP, SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeH21 <- MonsterLifeSpecs(MonsterLifeData, c("ModifiedFireBoar", "Dodo", "Leica", "NineTailedFox", "AkairumPriest",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"CoupleYeti", "Oberon", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli", "RomantistKingSlime",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### DEX Type 2-1 : DEX, SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeD21 <- MonsterLifeSpecs(MonsterLifeData, c("Lilinoch", "Taeryun", "AkairumPriest", "Papulatus", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "PapulatusClock", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### DEX Type 2-2 : DEX, SummonedDuration=T, FinalATKDMR=F, CRR=F
MLTypeD22 <- MonsterLifeSpecs(MonsterLifeData, c("Lilinoch", "Taeryun", "AkairumPriest", "Papulatus", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "GoldYeti", "CoupleYeti", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### DEX Type 2-3 : DEX, SummonedDuration=F, FinalATKDMR=T, CRR=F
MLTypeD23 <- MonsterLifeSpecs(MonsterLifeData, c("Lilinoch", "Taeryun", "AkairumPriest", "Papulatus", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "Puso", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### DEX Type 2-4 : DEX, SummonedDuration=T, FinalATKDMR=T, CRR=F
MLTypeD24 <- MonsterLifeSpecs(MonsterLifeData, c("Lilinoch", "Taeryun", "AkairumPriest", "CoupleYeti", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "GoldYeti", "Puso", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### INT Type 2-1 : INT, SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeI21 <- MonsterLifeSpecs(MonsterLifeData, c("Timer", "MachineMT09", "ReeperSpecter", "AkairumPriest", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "PapulatusClock", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### INT Type 2-2 : INT, SummonedDuration=T, FinalATKDMR=F, CRR=F
MLTypeI22 <- MonsterLifeSpecs(MonsterLifeData, c("Timer", "MachineMT09", "ReeperSpecter", "AkairumPriest", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "GoldYeti", "CoupleYeti", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### LUK Type 2-1 : LUK, SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeL21 <- MonsterLifeSpecs(MonsterLifeData, c("Dunas", "Hogeol", "Papulatus", "LightSoul", "AkairumPriest", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### LUK Type 2-2 : LUK, SummonedDuration=T, FinalATKDMR=F, CRR=F
MLTypeL22 <- MonsterLifeSpecs(MonsterLifeData, c("Dunas", "Hogeol", "Papulatus", "LightSoul", "AkairumPriest", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"CoupleYeti", "GoldYeti", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
#### Allstat Type 2-1 : ALLSTAT(Xenon), SummonedDuration=F, FinalATKDMR=F, CRR=F
MLTypeA21 <- MonsterLifeSpecs(MonsterLifeData, c("Hogeol", "Leica", "Papulatus", "Taeryun", "AkairumPriest", "NineTailedFox",
"VikingCorps", "SleepyViking", "TiredViking", "EnoughViking", "SeriousViking",
"Oberon", "Beril", "Phantom", "Eunwol", "Rang",
"VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum", "Scarecrow", "Lazuli",
"WolmyoThief", "ToyKnight", "IncongruitySoul", "YetiPharaoh"))
#### Shinsoo, EliteBloodTooth, YetiPharaoh
### Farm Level 40(28 Slots)
#### STR Type 3-1 : STR, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeS31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "MasterRednug",
"Bigeup", "IncongruitySoul", "FrankenRoid", "Leica", "ReeperSpecter",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox", "KingSlime"))
#### EliteBloodTooth, DemonWarrior, ThiefCrow, Ifrit
#### STR Type 3-2 : STR, SummonedDuration=F, BuffDuration=T, FinalATKDMR=F, CRR=F
MLTypeS32 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum",
"Lazuli", "Tinman", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "MasterRednug",
"IncongruitySoul", "FrankenRoid", "Leica", "DemonWarrior",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox", "PapulatusClock"))
#### EliteBloodTooth, AkairumPriest, Victor, Ifrit
#### STR Type 3-3 : STR, SummonedDuration=F, BuffDuration=F, FinalATKDMR=T, CRR=T
MLTypeS33 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "Pierre",
"Puso", "Bigeup", "IncongruitySoul", "FrankenRoid", "Leica",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox", "Targa"))
#### EliteBloodTooth, KingSlime, ThiefCrow, Ifrit
#### STR Type 3-5 : STR, SummonedDuration=T, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeS35 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "BigPumpkin",
"CoupleYeti", "GoldYeti", "IncongruitySoul", "FrankenRoid", "Leica",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox", "KingSlime"))
#### EliteBloodTooth, Giant, ThiefCrow, Ifrit
#### HP Type 3-1 : HP(DemonAvenger), SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeH31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "GiantDarkSoul", "InnerRage", "Tinman", "SmallBalloon", "KingCastleGolem",
"Bigeup", "IncongruitySoul", "Dodo", "ModifiedFireBoar",
"ToyKnight", "Oberon", "SeriousViking", "KingSlime", "NineTailedFox", "Giant"))
#### EliteBloodTooth, StrangeMonster, ThiefCrow, PrimeMinister
#### DEX Type 3-1 : DEX, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeD31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "MasterRelic",
"Bigeup", "IncongruitySoul", "Lilinoch", "Taeryun", "AkairumPriest",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox"))
#### EliteBloodTooth, KingSlime, GuwaruVestige, Ifrit
#### DEX Type 3-2 : DEX, SummonedDuration=F, BuffDuration=F, FinalATKDMR=T, CRR=T
MLTypeD32 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "Pierre",
"Puso", "Bigeup", "IncongruitySoul", "Lilinoch", "Taeryun", "AkairumPriest",
"Targa", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox"))
#### EliteBloodTooth, KingSlime, ThiefCrow, Ifrit
#### DEX Type 3-3 : DEX, SummonedDuration=T, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeD33 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "BigPumpkin",
"CoupleYeti", "GoldYeti", "IncongruitySoul", "Lilinoch", "Taeryun",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox", "KingSlime"))
#### EliteBloodTooth, Giant, ThiefCrow, Ifrit
#### DEX Type 3-4 : DEX, SummonedDuration=T, BuffDuration=F, FinalATKDMR=T, CRR=F
MLTypeD34 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "Tinman", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "Pierre",
"CoupleYeti", "GoldYeti", "IncongruitySoul", "Lilinoch", "Taeryun", "PapulatusClock",
"Targa", "Oberon", "SeriousViking", "WolmyoThief", "NineTailedFox"))
#### EliteBloodTooth, Victor, ThiefCrow, Ifrit
#### INT Type 3-1 : INT, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeI31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "MasterMargana",
"Bigeup", "IncongruitySoul", "Timer", "MachineMT09", "AkairumPriest", "DemonMagician",
"ToyKnight", "Oberon", "WolmyoThief", "NineTailedFox", "Ifrit"))
#### EliteBloodTooth, KingSlime, ThiefCrow, SeriousViking
#### INT Type 3-2 : INT, SummonedDuration=F, BuffDuration=T, FinalATKDMR=F, CRR=T
MLTypeI32 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "Will", "VonBon", "HugeSpider",
"MiniSpider", "IncongruitySoul", "Timer", "MachineMT09", "AkairumPriest", "ReeperSpecter",
"ToyKnight", "Oberon", "WolmyoThief", "NineTailedFox"))
#### SeriousViking, EliteBloodTooth, KingSlime, Grief
#### INT Type 3-3 : INT, SummonedDuration=F, BuffDuration=T, FinalATKDMR=F, CRR=F
MLTypeI33 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum",
"Lazuli", "Tinman", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "Will", "VonBon", "HugeSpider",
"MiniSpider", "IncongruitySoul", "Timer", "MachineMT09", "AkairumPriest", "PapulatusClock",
"ToyKnight", "Oberon", "WolmyoThief", "NineTailedFox"))
#### SeriousViking, EliteBloodTooth, Victor, Grief
#### INT Type 3-5 : INT, SummonedDuration=T, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeI35 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "BigPumpkin",
"CoupleYeti", "GoldYeti", "IncongruitySoul", "Timer", "MachineMT09",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "Ifrit", "NineTailedFox"))
#### EliteBloodTooth, Giant, ThiefCrow, KingSlime
#### LUK Type 3-1 : LUK, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeL31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "MasterHisab",
"Bigeup", "IncongruitySoul", "Dunas", "Hogeol", "Papulatus",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "Ifrit", "NineTailedFox"))
#### EliteBloodTooth, ThiefCrow, KingSlime, Ergoth
#### LUK Type 3-2 : LUK, SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=T, CoolTimeReset=T
MLTypeL32 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "Lapis", "SmallBalloon", "BigBalloon",
"Bigeup", "IncongruitySoul", "Dunas", "Hogeol", "Papulatus",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "Ifrit", "NineTailedFox"))
#### EliteBloodTooth, ThiefCrow, KingSlime, PrimeMinister
#### LUK Type 3-3 : LUK, SummonedDuration=T, BuffDuration=F, FinalATKDMR=F, CRR=T
MLTypeL33 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "BigPumpkin",
"CoupleYeti", "GoldYeti", "IncongruitySoul", "Dunas", "Hogeol", "NineTailedFox",
"ToyKnight", "Oberon", "SeriousViking", "WolmyoThief", "Ifrit"))
#### EliteBloodTooth, Giant, ThiefCrow, KingSlime
#### Allstat Type 3-1 : ALLSTAT(Xenon), SummonedDuration=F, BuffDuration=F, FinalATKDMR=F, CRR=F
MLTypeA31 <- MonsterLifeSpecs(MonsterLifeData, c("VonLeon", "Cygnus", "BlackViking", "Hilla", "Akairum",
"Lazuli", "RomantistKingSlime", "Scarecrow", "Phantom", "Eunwol", "Rang",
"LightLumi", "DarkLumi", "EquilLumi", "Lania", "DarkMageShadow", "Lapis", "Tinman",
"Bigeup", "IncongruitySoul", "Beril", "PapulatusClock",
"ToyKnight", "Oberon", "Victor", "SeriousViking", "WolmyoThief", "NineTailedFox"))
#### EliteBloodTooth, AkariumPriest, Ifrit, KingSlime
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rkolada.R
\name{rkolada}
\alias{rkolada}
\title{rkolada}
\usage{
rkolada(...)
}
\description{
Access the Kolada API
}
| /man/rkolada.Rd | no_license | OxanaFalk/rkolada | R | false | true | 196 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rkolada.R
\name{rkolada}
\alias{rkolada}
\title{rkolada}
\usage{
rkolada(...)
}
\description{
Access the Kolada API
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aapply.R
\name{aapply}
\alias{aapply}
\title{aapply is like sapply but guaranteed to return a matrix}
\usage{
aapply(X, FUN, ...)
}
\arguments{
\item{X}{a vector (atomic or list) or an expression object. Other objects (including classed objects) will be coerced by base::as.list.}
\item{FUN}{the function to be applied to each element of X. In the case of functions like +, %*%, the function name must be backquoted or quoted.}
\item{...}{option arguments to \code{FUN}}
}
\description{
aapply is like sapply but guaranteed to return a matrix
}
| /man/aapply.Rd | no_license | AnthonyEbert/acetools | R | false | true | 625 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aapply.R
\name{aapply}
\alias{aapply}
\title{aapply is like sapply but guaranteed to return a matrix}
\usage{
aapply(X, FUN, ...)
}
\arguments{
\item{X}{a vector (atomic or list) or an expression object. Other objects (including classed objects) will be coerced by base::as.list.}
\item{FUN}{the function to be applied to each element of X. In the case of functions like +, %*%, the function name must be backquoted or quoted.}
\item{...}{option arguments to \code{FUN}}
}
\description{
aapply is like sapply but guaranteed to return a matrix
}
|
# Seed --------------------------------------------------------------------
set.seed(150)
# Timing ------------------------------------------------------------------
tictoc::tic()
# Libraries ---------------------------------------------------------------
library(here)
library(tidyverse)
library(tidybayes)
# Helpers -----------------------------------------------------------------
get_y_hat <- function(fit) {
draws <- spread_draws(fit, y_hat[x])
# only keep 100 samples
which_keep <- sample(unique(draws$.draw), size = 100)
draws %>%
ungroup() %>%
filter(.draw %in% which_keep) %>%
select(.draw, x, y_hat)
}
extract_msa <- function(filename) {
msa_slug <- str_extract(filename, "msa-[0-9]+")
msa <- str_extract(msa_slug, "[0-9]+")
as.integer(msa)
}
# Load Data ---------------------------------------------------------------
message("Loading data...")
fit_files <- list.files(here("temp"),
pattern = "bp-reg_msa-[0-9]+.rds",
full.names = TRUE)
# Extract Deltas ----------------------------------------------------------
message("Extracting deltas...")
fit_tb <- tibble(filename = fit_files,
fit = map(filename, read_rds),
msa = map_int(filename, extract_msa))
y_hat_tb <- fit_tb %>%
transmute(msa,
y_hat_tb = map(fit, get_y_hat)) %>%
unnest(y_hat_tb)
# Save --------------------------------------------------------------------
message("Saving...")
write_rds(y_hat_tb, here("out", "bayes_y_hats.rds"))
| /09_extract-y-hats.R | no_license | adviksh/tipping-points-replication | R | false | false | 1,569 | r | # Seed --------------------------------------------------------------------
set.seed(150)
# Timing ------------------------------------------------------------------
tictoc::tic()
# Libraries ---------------------------------------------------------------
library(here)
library(tidyverse)
library(tidybayes)
# Helpers -----------------------------------------------------------------
get_y_hat <- function(fit) {
draws <- spread_draws(fit, y_hat[x])
# only keep 100 samples
which_keep <- sample(unique(draws$.draw), size = 100)
draws %>%
ungroup() %>%
filter(.draw %in% which_keep) %>%
select(.draw, x, y_hat)
}
extract_msa <- function(filename) {
msa_slug <- str_extract(filename, "msa-[0-9]+")
msa <- str_extract(msa_slug, "[0-9]+")
as.integer(msa)
}
# Load Data ---------------------------------------------------------------
message("Loading data...")
fit_files <- list.files(here("temp"),
pattern = "bp-reg_msa-[0-9]+.rds",
full.names = TRUE)
# Extract Deltas ----------------------------------------------------------
message("Extracting deltas...")
fit_tb <- tibble(filename = fit_files,
fit = map(filename, read_rds),
msa = map_int(filename, extract_msa))
y_hat_tb <- fit_tb %>%
transmute(msa,
y_hat_tb = map(fit, get_y_hat)) %>%
unnest(y_hat_tb)
# Save --------------------------------------------------------------------
message("Saving...")
write_rds(y_hat_tb, here("out", "bayes_y_hats.rds"))
|
#!/usr/bin/env Rscript
# Load renv.
root <- dirname(dirname(getwd()))
renv::load(root)
# Other imports.
suppressPackageStartupMessages({
library(dplyr)
library(tidygeocoder)
library(openRealestate)
library(microbenchmark)
})
# Load the test data. 100 rows of Durham addresses.
datadir <- file.path(root,"data")
myfile <- file.path(datadir,"durham_test.rda")
load(myfile) # durham_test
# Create column with addresses.
df <- durham_test
df$ADDR <- paste(trimws(df$SITE_ADDR),"Durham NC")
# Encode addresses as lat/lon.
df <- df %>% geocode(ADDR) # Initial impression: geocode is slow!
# But, how long does it take?
message("\nEvaluating time needed to geocode 100 addresses...")
x100_rows <- df
benchmark <- microbenchmark(geocode(x100_rows,ADDR), times=3)
print(benchmark)
# How long to encode a larger dataset?
data(durham) # From openRealestate
# Calculate average time per row given the test above.
time_per_row <- mean(1/10^9 * benchmark$time/nrow(df))
time_durham <- time_per_row * nrow(durham) / (60*60)
# Status.
message(paste("\nPredicted time to encode",formatC(nrow(durham),big.mark=","),
"addresses:",round(time_durham,3),"hours."))
| /tests/testthat/timing-test.R | permissive | twesleyb/tidygeocoder | R | false | false | 1,164 | r | #!/usr/bin/env Rscript
# Load renv.
root <- dirname(dirname(getwd()))
renv::load(root)
# Other imports.
suppressPackageStartupMessages({
library(dplyr)
library(tidygeocoder)
library(openRealestate)
library(microbenchmark)
})
# Load the test data. 100 rows of Durham addresses.
datadir <- file.path(root,"data")
myfile <- file.path(datadir,"durham_test.rda")
load(myfile) # durham_test
# Create column with addresses.
df <- durham_test
df$ADDR <- paste(trimws(df$SITE_ADDR),"Durham NC")
# Encode addresses as lat/lon.
df <- df %>% geocode(ADDR) # Initial impression: geocode is slow!
# But, how long does it take?
message("\nEvaluating time needed to geocode 100 addresses...")
x100_rows <- df
benchmark <- microbenchmark(geocode(x100_rows,ADDR), times=3)
print(benchmark)
# How long to encode a larger dataset?
data(durham) # From openRealestate
# Calculate average time per row given the test above.
time_per_row <- mean(1/10^9 * benchmark$time/nrow(df))
time_durham <- time_per_row * nrow(durham) / (60*60)
# Status.
message(paste("\nPredicted time to encode",formatC(nrow(durham),big.mark=","),
"addresses:",round(time_durham,3),"hours."))
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.45,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/breast/breast_056.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/breast/breast_056.R | no_license | leon1003/QSMART | R | false | false | 351 | r | library(glmnet)
mydata = read.table("./TrainingSet/RF/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.45,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/breast/breast_056.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(here)
library(tidyverse)
library(tidytext)
library(stringr)
library(textreuse)
# Sys.setenv(WNHOME = "/Library/Frameworks/R.framework/Versions/4.0/Resources/library/wordnet")
library(wordnet)
library(zipfR)
library(corpustools)
library(quanteda)
source("text-functions.R")
tarzan.file <- here("data", "TarzanOfTheApes.txt")
tarzan.lines <- book_to_vector(tarzan.file, remove_last = 3)
tarzan.chapters <- book_to_chapters(tarzan.lines)
chapter_names <- get_chapter_names(tarzan.lines)
lecuture_functions(tarzan.lines)
tarzan.frame <- data.frame(chapter=chapter_names, text=tarzan.chapters)
str(tarzan.frame)
#10 longest words and sentences
longest_words <- tarzan.frame %>%
unnest_tokens(word, text) %>%
mutate(word_size=nchar(word)) %>%
arrange(desc(word_size)) %>%
top_n(10) %>%
mutate(order = fct_reorder(word, word_size))
longest_sentences <- tarzan.frame %>%
unnest_tokens(sentence, text, token = "sentences") %>%
mutate(sentence_len=nchar(sentence)) %>%
arrange(desc(sentence_len)) %>%
top_n(10) %>%
mutate(head=substr(sentence, 1, 20)) %>%
mutate(order = fct_reorder(head, sentence_len))
longest_words
longest_sentences
ggplot(longest_words) +
aes_string(x='order', y='word_size', fill='chapter') +
geom_bar(stat='identity', position='dodge', color='black') +
coord_flip()
ggplot(longest_sentences) +
aes_string(x='order', y='sentence_len', fill='chapter') +
geom_bar(stat='identity', position='dodge', color='black') +
coord_flip()
#clean data --> filter out stop words, remove numbers and punctuation, and (possibly) removing sparse words
cleaned_data <- clean_data(tarzan.lines)
cleaned_data
#Dendrogram
cleaned_data_tdm <- tm::TermDocumentMatrix(cleaned_data)
freqTerms <- tm::findFreqTerms(cleaned_data_tdm)
cleaned_df <- as.data.frame(cleaned_data_tdm[[1]])
cleaned_dist <- dist(cleaned_df)
dendrogram <- hclust(cleaned_dist, method="ward.D2")
#WordNet to mark the parts of speech for the 10 longest sentences
#found in part b for nouns and verbs having a length of 5 or greater.
source("text-functions.R")
vcorpus <- VCorpus(VectorSource(longest_sentences))
just_sentences <- list(vcorpus[["2"]][["content"]])
just_sentences_over_five <- lapply(just_sentences, remove_words_under_len_five)
just_sentences_over_five
#Get list of all words over length 5
words_over_five <- lapply(just_sentences_over_five, get_words)
words_over_five
#get all nouns
result <- lapply(words_over_five, filter_nouns)
#remove nulls and compress
nouns <- unlist(result, recursive = FALSE)
nouns[sapply(nouns, is.list)] <- NULL
nouns
#and verbs
result <- lapply(words_over_five, filter_verbs)
verbs <- unlist(result, recursive = FALSE)
verbs[sapply(verbs, is.list)] <- NULL
verbs
#Analyze word frequency using functions from package zipfR.
all_words <- lapply(just_sentences, get_words)
all_words
tdmblog <- TermDocumentMatrix(cleaned_data, control = list(removePunctuation = TRUE, removeNumbers = TRUE, stopwords = TRUE))
dtmblog <- DocumentTermMatrix(cleaned_data)
m <- as.matrix(tdmblog)
v <- sort(rowSums(m), decreasing=TRUE)
freq <- sort(colSums(as.matrix(dtmblog)), decreasing=TRUE)
wfblog <- data.frame(word=names(freq), freq=freq)
#Do analysis on frequencies
wfblog <- na.omit(wfblog)
summary(wfblog)
wfblog_table <- table(wfblog$freq)
length(wfblog$freq)
wfblog$word
barplot(wfblog$freq, names.arg = wfblog$word, main = "Frequency of Words", xlab = "Word", ylab = "# Times used",)
## zipfr work
wfblog_list <- data.frame(as.list(wfblog))
numeric_word_data <- data.matrix(wfblog$word)
numeric_word_data
indexs <- seq(from = 1, to = length(numeric_word_data), by = 1)
wfblog_tf <- tfl(wfblog$freq, k=indexs)
wfblog_spc <- tfl2spc(wfblog_tf)
# compute Zipf-Mandelbrot model from data and look at model summary
zm <- lnre("zm", wfblog_spc)
zm
## plot observed and expected spectrum
#TODO: Add words to numbers
zm.spc <- lnre.spc(zm,N(wfblog_spc))
plot(wfblog_spc, zm.spc, xlab="Most common words", ylab="Frequency",
ylim=c(0,4500))
legend(27,16000,c("Observed Frequency", "Expected Frequency"),
col=c("black", "red"),pch= 15,box.col="white", cex=1)
#TODO: Another zipfr visualization?
#Only do for words of length 6
vcorpus <- VCorpus(VectorSource(longest_sentences))
just_sentences <- list(vcorpus[["2"]][["content"]])
just_sentences_over_six <- lapply(just_sentences, remove_words_under_len_six)
just_sentences_over_six
#Get list of all words over length 5
words_over_six <- lapply(just_sentences_over_six, get_words)
words_over_six
#Generate bigrams and trigrams for all words whose length is greater than 6 characters in the 10 longest sentences
bigrams <- words_over_six %>%
unnest_tokens(bigram, words_over_six, token = "ngrams", n = 2)
trigrams <- words_over_six %>%
unnest_tokens(trigram, words_over_six, token = "ngrams", n = 3)
bigram_counts <- bigrams %>%
count(bigram, sort = TRUE)
trigram_counts <- trigrams %>%
count(trigram, sort = TRUE)
# bigrams
# trigrams
# bigram_counts
# trigram_counts
#Process the text from the document using quanteda
#Describe the methods you use, the results you get, and what you understand about the theme of the book.
dfm_inaug <- corpus(tarzan.lines) %>%
dfm(remove = stopwords('english'), remove_punct = TRUE) %>%
dfm_trim(min_termfreq = 10, verbose = FALSE)
set.seed(100)
textplot_wordcloud(dfm_inaug)
#Process the text from the document using corpustools
tc = create_tcorpus(tarzan.lines, doc_column = 'doc_id', text_columns='tokens')
tc$preprocess(use_stemming = T, remove_stopwords=T)
tc$tokens
#search for certain terms
dfm = get_dfm(tc, 'feature')
hits = search_features(tc, query = c('Savage# savage*','Apes# apes*', 'Man# man*', 'Jungle# jungle*', 'Good# good*', 'Bad# bad*'))
summary(hits)
#get relationships between words
g = semnet(hits, measure = 'con_prob')
igraph::get.adjacency(g, attr = 'weight')
plot(hits)
#TODO: Not sure what to do with this
# Process the text from the document using stringi
library(stringi)
| /text-analytics.R | no_license | sneakers-n-servers/text-analytics | R | false | false | 5,993 | r | library(here)
library(tidyverse)
library(tidytext)
library(stringr)
library(textreuse)
# Sys.setenv(WNHOME = "/Library/Frameworks/R.framework/Versions/4.0/Resources/library/wordnet")
library(wordnet)
library(zipfR)
library(corpustools)
library(quanteda)
source("text-functions.R")
tarzan.file <- here("data", "TarzanOfTheApes.txt")
tarzan.lines <- book_to_vector(tarzan.file, remove_last = 3)
tarzan.chapters <- book_to_chapters(tarzan.lines)
chapter_names <- get_chapter_names(tarzan.lines)
lecuture_functions(tarzan.lines)
tarzan.frame <- data.frame(chapter=chapter_names, text=tarzan.chapters)
str(tarzan.frame)
#10 longest words and sentences
longest_words <- tarzan.frame %>%
unnest_tokens(word, text) %>%
mutate(word_size=nchar(word)) %>%
arrange(desc(word_size)) %>%
top_n(10) %>%
mutate(order = fct_reorder(word, word_size))
longest_sentences <- tarzan.frame %>%
unnest_tokens(sentence, text, token = "sentences") %>%
mutate(sentence_len=nchar(sentence)) %>%
arrange(desc(sentence_len)) %>%
top_n(10) %>%
mutate(head=substr(sentence, 1, 20)) %>%
mutate(order = fct_reorder(head, sentence_len))
longest_words
longest_sentences
ggplot(longest_words) +
aes_string(x='order', y='word_size', fill='chapter') +
geom_bar(stat='identity', position='dodge', color='black') +
coord_flip()
ggplot(longest_sentences) +
aes_string(x='order', y='sentence_len', fill='chapter') +
geom_bar(stat='identity', position='dodge', color='black') +
coord_flip()
#clean data --> filter out stop words, remove numbers and punctuation, and (possibly) removing sparse words
cleaned_data <- clean_data(tarzan.lines)
cleaned_data
#Dendrogram
cleaned_data_tdm <- tm::TermDocumentMatrix(cleaned_data)
freqTerms <- tm::findFreqTerms(cleaned_data_tdm)
cleaned_df <- as.data.frame(cleaned_data_tdm[[1]])
cleaned_dist <- dist(cleaned_df)
dendrogram <- hclust(cleaned_dist, method="ward.D2")
#WordNet to mark the parts of speech for the 10 longest sentences
#found in part b for nouns and verbs having a length of 5 or greater.
source("text-functions.R")
vcorpus <- VCorpus(VectorSource(longest_sentences))
just_sentences <- list(vcorpus[["2"]][["content"]])
just_sentences_over_five <- lapply(just_sentences, remove_words_under_len_five)
just_sentences_over_five
#Get list of all words over length 5
words_over_five <- lapply(just_sentences_over_five, get_words)
words_over_five
#get all nouns
result <- lapply(words_over_five, filter_nouns)
#remove nulls and compress
nouns <- unlist(result, recursive = FALSE)
nouns[sapply(nouns, is.list)] <- NULL
nouns
#and verbs
result <- lapply(words_over_five, filter_verbs)
verbs <- unlist(result, recursive = FALSE)
verbs[sapply(verbs, is.list)] <- NULL
verbs
#Analyze word frequency using functions from package zipfR.
all_words <- lapply(just_sentences, get_words)
all_words
tdmblog <- TermDocumentMatrix(cleaned_data, control = list(removePunctuation = TRUE, removeNumbers = TRUE, stopwords = TRUE))
dtmblog <- DocumentTermMatrix(cleaned_data)
m <- as.matrix(tdmblog)
v <- sort(rowSums(m), decreasing=TRUE)
freq <- sort(colSums(as.matrix(dtmblog)), decreasing=TRUE)
wfblog <- data.frame(word=names(freq), freq=freq)
#Do analysis on frequencies
wfblog <- na.omit(wfblog)
summary(wfblog)
wfblog_table <- table(wfblog$freq)
length(wfblog$freq)
wfblog$word
barplot(wfblog$freq, names.arg = wfblog$word, main = "Frequency of Words", xlab = "Word", ylab = "# Times used",)
## zipfr work
wfblog_list <- data.frame(as.list(wfblog))
numeric_word_data <- data.matrix(wfblog$word)
numeric_word_data
indexs <- seq(from = 1, to = length(numeric_word_data), by = 1)
wfblog_tf <- tfl(wfblog$freq, k=indexs)
wfblog_spc <- tfl2spc(wfblog_tf)
# compute Zipf-Mandelbrot model from data and look at model summary
zm <- lnre("zm", wfblog_spc)
zm
## plot observed and expected spectrum
#TODO: Add words to numbers
zm.spc <- lnre.spc(zm,N(wfblog_spc))
plot(wfblog_spc, zm.spc, xlab="Most common words", ylab="Frequency",
ylim=c(0,4500))
legend(27,16000,c("Observed Frequency", "Expected Frequency"),
col=c("black", "red"),pch= 15,box.col="white", cex=1)
#TODO: Another zipfr visualization?
#Only do for words of length 6
vcorpus <- VCorpus(VectorSource(longest_sentences))
just_sentences <- list(vcorpus[["2"]][["content"]])
just_sentences_over_six <- lapply(just_sentences, remove_words_under_len_six)
just_sentences_over_six
#Get list of all words over length 5
words_over_six <- lapply(just_sentences_over_six, get_words)
words_over_six
#Generate bigrams and trigrams for all words whose length is greater than 6 characters in the 10 longest sentences
bigrams <- words_over_six %>%
unnest_tokens(bigram, words_over_six, token = "ngrams", n = 2)
trigrams <- words_over_six %>%
unnest_tokens(trigram, words_over_six, token = "ngrams", n = 3)
bigram_counts <- bigrams %>%
count(bigram, sort = TRUE)
trigram_counts <- trigrams %>%
count(trigram, sort = TRUE)
# bigrams
# trigrams
# bigram_counts
# trigram_counts
#Process the text from the document using quanteda
#Describe the methods you use, the results you get, and what you understand about the theme of the book.
dfm_inaug <- corpus(tarzan.lines) %>%
dfm(remove = stopwords('english'), remove_punct = TRUE) %>%
dfm_trim(min_termfreq = 10, verbose = FALSE)
set.seed(100)
textplot_wordcloud(dfm_inaug)
#Process the text from the document using corpustools
tc = create_tcorpus(tarzan.lines, doc_column = 'doc_id', text_columns='tokens')
tc$preprocess(use_stemming = T, remove_stopwords=T)
tc$tokens
#search for certain terms
dfm = get_dfm(tc, 'feature')
hits = search_features(tc, query = c('Savage# savage*','Apes# apes*', 'Man# man*', 'Jungle# jungle*', 'Good# good*', 'Bad# bad*'))
summary(hits)
#get relationships between words
g = semnet(hits, measure = 'con_prob')
igraph::get.adjacency(g, attr = 'weight')
plot(hits)
#TODO: Not sure what to do with this
# Process the text from the document using stringi
library(stringi)
|
# Copyright (C) 2014 - 2015 Jack O. Wasey
#
# This file is part of icd9.
#
# icd9 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# icd9 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with icd9. If not, see <http:#www.gnu.org/licenses/>.
context("test reshaping wide to long")
longdf <- data.frame(visitId = c("a", "b", "b", "c"),
icd9 = c("441", "4424", "443", "441"))
widedf <- data.frame(visitId = c("a", "b", "c"),
icd9_001 = c("441", "4424", "441"),
icd9_002 = c(NA, "443", NA))
test_that("long data to wide data", {
longcmp <- data.frame(visitId = c("a", "b", "c"),
icd_001 = c("441", "4424", "441"),
icd_002 = c(NA, "443", NA))
expect_equal(icd9LongToWide(longdf, return.df = TRUE), longcmp)
longcmp2 <- data.frame(visitId = c("a", "b", "c"),
icd_001 = c("441", "4424", "441"),
icd_002 = c(NA, "443", NA),
icd_003 = c(NA, NA, NA))
expect_equal(icd9LongToWide(longdf, min.width = 3, return.df = TRUE), longcmp2)
longdf2 <- data.frame(i = c("441", "4424", "443", "441"),
v = c("a", "b", "b", "c"))
expect_equal(names(icd9LongToWide(longdf2,
visitId = "v",
icd9Field = "i",
prefix = "ICD10_", return.df = TRUE)),
c("v", "ICD10_001", "ICD10_002"))
})
test_that("wide data to long data", {
expect_equivalent(icd9WideToLong(widedf),
longdf)
widedfempty <- data.frame(visitId = c("a", "b", "c"),
icd9_001 = c("441", "4424", "441"),
icd9_002 = c("", "443", ""))
expect_equivalent(icd9WideToLong(widedfempty),
longdf)
expect_equal(icd9WideToLong(widedfempty),
icd9WideToLong(widedfempty))
})
| /icd9/tests/testthat/test-reshape.R | no_license | ingted/R-Examples | R | false | false | 2,410 | r | # Copyright (C) 2014 - 2015 Jack O. Wasey
#
# This file is part of icd9.
#
# icd9 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# icd9 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with icd9. If not, see <http:#www.gnu.org/licenses/>.
context("test reshaping wide to long")
longdf <- data.frame(visitId = c("a", "b", "b", "c"),
icd9 = c("441", "4424", "443", "441"))
widedf <- data.frame(visitId = c("a", "b", "c"),
icd9_001 = c("441", "4424", "441"),
icd9_002 = c(NA, "443", NA))
test_that("long data to wide data", {
longcmp <- data.frame(visitId = c("a", "b", "c"),
icd_001 = c("441", "4424", "441"),
icd_002 = c(NA, "443", NA))
expect_equal(icd9LongToWide(longdf, return.df = TRUE), longcmp)
longcmp2 <- data.frame(visitId = c("a", "b", "c"),
icd_001 = c("441", "4424", "441"),
icd_002 = c(NA, "443", NA),
icd_003 = c(NA, NA, NA))
expect_equal(icd9LongToWide(longdf, min.width = 3, return.df = TRUE), longcmp2)
longdf2 <- data.frame(i = c("441", "4424", "443", "441"),
v = c("a", "b", "b", "c"))
expect_equal(names(icd9LongToWide(longdf2,
visitId = "v",
icd9Field = "i",
prefix = "ICD10_", return.df = TRUE)),
c("v", "ICD10_001", "ICD10_002"))
})
test_that("wide data to long data", {
expect_equivalent(icd9WideToLong(widedf),
longdf)
widedfempty <- data.frame(visitId = c("a", "b", "c"),
icd9_001 = c("441", "4424", "441"),
icd9_002 = c("", "443", ""))
expect_equivalent(icd9WideToLong(widedfempty),
longdf)
expect_equal(icd9WideToLong(widedfempty),
icd9WideToLong(widedfempty))
})
|
# Uses procedure outlined by Breiman to estimate the loss at each
# observation 1,...,N in 'data'.
# sampeFcn is a function that takes the inidices 1,..,N and returns
# a single sample to be the LEARNING data. Default is size N bootstrap
# with replacement.
getOOBLoss <- function(model_tree.obj,data,nboot=100,
sampleFcn = function(idx_vec){sample(idx_vec,replace=TRUE)},
minsplit, minbucket,lossfcn)
{
if(!inherits(model_tree.obj, "itree")) stop("Not legitimate itree object")
#get the whole response vector.
yname <- strsplit(deparse(model_tree.obj$call$formula),"~")[[1]][1]
yname <- sub(" ","",yname)
treemethod <- model_tree.obj$method #passed to rpart
mm <- model_tree.obj$method #to figure out loss function
if(mm %in% c("class_extremes","class_purity","class")){
mm <- "class"
Y <- match(data[,yname], levels(data[,yname])) #needs to be numeric, not string
}
if(mm %in% c("anova","regression_purity","regression_extremes")){
mm <- "anova"
Y <- data[,yname]
}
#print error for non anova/class methods
if(!(mm %in% c("class","anova"))){
stop("getOOBLoss not defined for this method.")
}
if(treemethod=="anova"){
ppp <- model_tree.obj$parms
}
else{
ppp <- model_tree.obj$call$parms
if(treemethod=="class_extremes"){
ppp <- eval(ppp)
}
if(treemethod=="regression_extremes"){
ppp <- eval(ppp)
}
}
#some constants
N <- nrow(data)
p <- ncol(data)
idx <- 1:N
#what are the nodesizes?
if(missing(minsplit)){ minsplit_final <- eval(model_tree.obj$control$minsplit) }
else{
if(is.numeric(minsplit)){
if(minsplit<1){ minsplit_final <- round(N*minsplit)} #assume it's a fraction of N
else{ minsplit_final <- round(minsplit)}
}else{
if(class(minsplit)!="function"){
stop("Invalid minsplit argument. Pass a function of N, a fraction or an integer.")
}
minsplit_final <- minsplit(N)
}
}
if(missing(minbucket)){minbucket_final <- eval(model_tree.obj$control$minbucket) }
else{
if(is.numeric(minbucket)){
if(minbucket<1){ minbucket_final <- round(N*minbucket)} #assume it's a fraction of N
else{ minbucket_final <- round(minbucket)}
}else{
if(class(minbucket)!="function"){
stop("Invalid minbucket argument. Pass a function of N, a fraction or an integer.")
}
minbucket_final <- minbucket(N)
}
}
# place to hold out-of-sample predictions
# holdout.predictions[i,j] = oob pred on obs of i in jth train sample
# = NA if obs i is insample for run j.
holdout.predictions <- matrix(NA,nrow=N,ncol=nboot)
#bootstrap/xval runs...
for(i in 1:nboot){
idx.sub1 <- sampleFcn(idx)
idx.sub2 <- setdiff(idx,idx.sub1) #those not in 1st sumsample
insample <- data[idx.sub1,]; outsample <- data[idx.sub2,]
#get predictions
temp <- itree(eval(model_tree.obj$call$formula),data=insample,method=treemethod,
minbucket= minbucket_final,
minsplit = minsplit_final,
cp = eval(model_tree.obj$control$cp),
parms = ppp,xval=0)
#get predictions
if(mm=="class"){
preds <- predict(temp,outsample,type="class")
}else{
preds <- predict(temp,outsample)
}
holdout.predictions[idx.sub2,i] <- preds
}#end of bootstrap runs.
#now clean up and format for output.
cnames <- paste("xval",(1:nboot),sep="")
colnames(holdout.predictions) <- cnames
#now assess mse, bias, variance
num.not.na <- apply(holdout.predictions,1,function(temp){sum(!is.na(temp))})
if(mm=="anova"){
preds <- apply(holdout.predictions,1,function(temp){mean(temp[!is.na(temp)])})
#varpred <- apply(holdout.predictions,1,function(temp){var(temp[!is.na(temp)])})
YY <- matrix(rep(Y,nboot),nrow=N,ncol=nboot)
if(missing(lossfcn)){
YY <- (YY-holdout.predictions)^2
YY[is.na(YY)] <- 0
mses <- apply(YY,1,sum)/num.not.na
}else{
mses <- lossfcn(YY,preds)
}
return(list(bagpred=preds,holdout.predictions=holdout.predictions,avgOOBloss=mses))
}
else{ #classification
Mode <- function(x) {
x <- x[!is.na(x)]
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
preds <- apply(holdout.predictions,1,Mode)
YY <- matrix(rep(Y,nboot),nrow=N,ncol=nboot)
if(missing(lossfcn)){
YY <- (YY != holdout.predictions)
YY[is.na(YY)] <- 0
miscl <- apply(YY,1,sum)/num.not.na
}else{
miscl <- lossfcn(YY,preds)
}
return(list(bagpred=preds,holdout.predictions=holdout.predictions,avgOOBloss=miscl))
}
}
estNodeRisk <- function(tree.obj,est_observation_loss){
#row number in tree.obj$frame of the leaf node for each obs
if(!inherits(tree.obj, "itree")) stop("Not legitimate itree object")
ww <- tree.obj$where
nodes <- as.matrix(sort(unique(ww)),ncol=1)
node.avg.loss <- function(nodenum){
mean(est_observation_loss[ww==nodenum])
}
node.sd.loss <- function(nodenum){
sd(est_observation_loss[ww==nodenum])
}
avg.loss <- apply(nodes,1,node.avg.loss)
sd.loss <- apply(nodes,1,node.sd.loss)
temp <- list(est.risk=avg.loss,sd.loss=sd.loss)
class(temp) <- "estNodeRisk"
return(temp)
} | /itree/R/holdoutNodePerformance.R | no_license | ingted/R-Examples | R | false | false | 4,987 | r | # Uses procedure outlined by Breiman to estimate the loss at each
# observation 1,...,N in 'data'.
# sampeFcn is a function that takes the inidices 1,..,N and returns
# a single sample to be the LEARNING data. Default is size N bootstrap
# with replacement.
getOOBLoss <- function(model_tree.obj,data,nboot=100,
sampleFcn = function(idx_vec){sample(idx_vec,replace=TRUE)},
minsplit, minbucket,lossfcn)
{
if(!inherits(model_tree.obj, "itree")) stop("Not legitimate itree object")
#get the whole response vector.
yname <- strsplit(deparse(model_tree.obj$call$formula),"~")[[1]][1]
yname <- sub(" ","",yname)
treemethod <- model_tree.obj$method #passed to rpart
mm <- model_tree.obj$method #to figure out loss function
if(mm %in% c("class_extremes","class_purity","class")){
mm <- "class"
Y <- match(data[,yname], levels(data[,yname])) #needs to be numeric, not string
}
if(mm %in% c("anova","regression_purity","regression_extremes")){
mm <- "anova"
Y <- data[,yname]
}
#print error for non anova/class methods
if(!(mm %in% c("class","anova"))){
stop("getOOBLoss not defined for this method.")
}
if(treemethod=="anova"){
ppp <- model_tree.obj$parms
}
else{
ppp <- model_tree.obj$call$parms
if(treemethod=="class_extremes"){
ppp <- eval(ppp)
}
if(treemethod=="regression_extremes"){
ppp <- eval(ppp)
}
}
#some constants
N <- nrow(data)
p <- ncol(data)
idx <- 1:N
#what are the nodesizes?
if(missing(minsplit)){ minsplit_final <- eval(model_tree.obj$control$minsplit) }
else{
if(is.numeric(minsplit)){
if(minsplit<1){ minsplit_final <- round(N*minsplit)} #assume it's a fraction of N
else{ minsplit_final <- round(minsplit)}
}else{
if(class(minsplit)!="function"){
stop("Invalid minsplit argument. Pass a function of N, a fraction or an integer.")
}
minsplit_final <- minsplit(N)
}
}
if(missing(minbucket)){minbucket_final <- eval(model_tree.obj$control$minbucket) }
else{
if(is.numeric(minbucket)){
if(minbucket<1){ minbucket_final <- round(N*minbucket)} #assume it's a fraction of N
else{ minbucket_final <- round(minbucket)}
}else{
if(class(minbucket)!="function"){
stop("Invalid minbucket argument. Pass a function of N, a fraction or an integer.")
}
minbucket_final <- minbucket(N)
}
}
# place to hold out-of-sample predictions
# holdout.predictions[i,j] = oob pred on obs of i in jth train sample
# = NA if obs i is insample for run j.
holdout.predictions <- matrix(NA,nrow=N,ncol=nboot)
#bootstrap/xval runs...
for(i in 1:nboot){
idx.sub1 <- sampleFcn(idx)
idx.sub2 <- setdiff(idx,idx.sub1) #those not in 1st sumsample
insample <- data[idx.sub1,]; outsample <- data[idx.sub2,]
#get predictions
temp <- itree(eval(model_tree.obj$call$formula),data=insample,method=treemethod,
minbucket= minbucket_final,
minsplit = minsplit_final,
cp = eval(model_tree.obj$control$cp),
parms = ppp,xval=0)
#get predictions
if(mm=="class"){
preds <- predict(temp,outsample,type="class")
}else{
preds <- predict(temp,outsample)
}
holdout.predictions[idx.sub2,i] <- preds
}#end of bootstrap runs.
#now clean up and format for output.
cnames <- paste("xval",(1:nboot),sep="")
colnames(holdout.predictions) <- cnames
#now assess mse, bias, variance
num.not.na <- apply(holdout.predictions,1,function(temp){sum(!is.na(temp))})
if(mm=="anova"){
preds <- apply(holdout.predictions,1,function(temp){mean(temp[!is.na(temp)])})
#varpred <- apply(holdout.predictions,1,function(temp){var(temp[!is.na(temp)])})
YY <- matrix(rep(Y,nboot),nrow=N,ncol=nboot)
if(missing(lossfcn)){
YY <- (YY-holdout.predictions)^2
YY[is.na(YY)] <- 0
mses <- apply(YY,1,sum)/num.not.na
}else{
mses <- lossfcn(YY,preds)
}
return(list(bagpred=preds,holdout.predictions=holdout.predictions,avgOOBloss=mses))
}
else{ #classification
Mode <- function(x) {
x <- x[!is.na(x)]
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
preds <- apply(holdout.predictions,1,Mode)
YY <- matrix(rep(Y,nboot),nrow=N,ncol=nboot)
if(missing(lossfcn)){
YY <- (YY != holdout.predictions)
YY[is.na(YY)] <- 0
miscl <- apply(YY,1,sum)/num.not.na
}else{
miscl <- lossfcn(YY,preds)
}
return(list(bagpred=preds,holdout.predictions=holdout.predictions,avgOOBloss=miscl))
}
}
estNodeRisk <- function(tree.obj,est_observation_loss){
#row number in tree.obj$frame of the leaf node for each obs
if(!inherits(tree.obj, "itree")) stop("Not legitimate itree object")
ww <- tree.obj$where
nodes <- as.matrix(sort(unique(ww)),ncol=1)
node.avg.loss <- function(nodenum){
mean(est_observation_loss[ww==nodenum])
}
node.sd.loss <- function(nodenum){
sd(est_observation_loss[ww==nodenum])
}
avg.loss <- apply(nodes,1,node.avg.loss)
sd.loss <- apply(nodes,1,node.sd.loss)
temp <- list(est.risk=avg.loss,sd.loss=sd.loss)
class(temp) <- "estNodeRisk"
return(temp)
} |
vm <- "...data_local/tas.gov.au/TASVEG/GDB/TASVEG3.gdb"
library(vapour)
vegdata <- tibble::as_tibble(vapour::vapour_read_attributes(vm))
#66.8Mb
vegeom <- vapour::vapour_read_geometry(vm)
vegeom <- sf::st_as_sfc(vegeom)
library(sf)
x <- st_sf(geometry = vegeom, rownum = seq_along(vegeom))
x$rownum <- seq_len(nrow(x))
library(raster)
r <- raster(spex::buffer_extent(x, 10), res = 10)
library(fasterize)
vegraster <- fasterize::fasterize(x, r, field = "rownum")
saveRDS(vegraster, "vegraster.rds", compress = FALSE)
saveRDS(vegdata, "vegdata.rds", compress = "bzip2")
library(raster)
vegraster <- readRDS("vegraster.rds")
veg1 <- crop(vegraster, extent(vegraster, 1, 20000, 1, ncol(vegraster)),
filename = "veg1.grd", datatype = "INT4U")
veg1 <- crop(vegraster, extent(vegraster, 20001, nrow(vegraster), 1, ncol(vegraster)),
filename = "veg2.grd", datatype = "INT4U")
system("gdalbuildvrt veg.vrt veg1.grd veg2.grd")
system("gdal_translate veg.vrt vegmap3.tif -a_srs 32755 -ot UInt32 -co COMPRESS=DEFLATE -co TILED=YES")
| /data-raw/rasterize-veg.R | no_license | mdsumner/vegmapdata | R | false | false | 1,050 | r | vm <- "...data_local/tas.gov.au/TASVEG/GDB/TASVEG3.gdb"
library(vapour)
vegdata <- tibble::as_tibble(vapour::vapour_read_attributes(vm))
#66.8Mb
vegeom <- vapour::vapour_read_geometry(vm)
vegeom <- sf::st_as_sfc(vegeom)
library(sf)
x <- st_sf(geometry = vegeom, rownum = seq_along(vegeom))
x$rownum <- seq_len(nrow(x))
library(raster)
r <- raster(spex::buffer_extent(x, 10), res = 10)
library(fasterize)
vegraster <- fasterize::fasterize(x, r, field = "rownum")
saveRDS(vegraster, "vegraster.rds", compress = FALSE)
saveRDS(vegdata, "vegdata.rds", compress = "bzip2")
library(raster)
vegraster <- readRDS("vegraster.rds")
veg1 <- crop(vegraster, extent(vegraster, 1, 20000, 1, ncol(vegraster)),
filename = "veg1.grd", datatype = "INT4U")
veg1 <- crop(vegraster, extent(vegraster, 20001, nrow(vegraster), 1, ncol(vegraster)),
filename = "veg2.grd", datatype = "INT4U")
system("gdalbuildvrt veg.vrt veg1.grd veg2.grd")
system("gdal_translate veg.vrt vegmap3.tif -a_srs 32755 -ot UInt32 -co COMPRESS=DEFLATE -co TILED=YES")
|
#
# Copyright 2007-2017 Timothy C. Bates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# How I coded this data from the Boulder example
#
# GFF = read.table("~/bin/umx/data/DHBQ_bs.dat", header = T, sep = "\t", as.is = c(T), na.strings = -999)
# x = umx_rename(GFF, old = "zyg2" , replace = "zyg_2grp"); names(x)
# x = umx_rename(x , old = "zyg" , replace = "zyg_6grp"); names(x)
# x = umx_rename(x , grep = "([12bs])$", replace = "_T\\1") ; names(x)
# x$sex_T1 = factor(x$sex_T1, levels = 0:1, labels = c("male", "female"))
# x$sex_T2 = factor(x$sex_T2, levels = 0:1, labels = c("male", "female"))
# x$sex_Tb = factor(x$sex_Tb, levels = 0:1, labels = c("male", "female"))
# x$sex_Ts = factor(x$sex_Ts, levels = 0:1, labels = c("male", "female"))
# x$zyg_6grp = factor(x$zyg_6grp, levels = 1:6, labels = c("MZMM", "DZMM", "MZFF", "DZFF", "DZFM", "DZMF"))
# GFF$zyg_2grp = factor(GFF$zyg_2grp, levels = 1:2, labels = c("MZ", "DZ"))
# GFF = GFF[, c("zyg_6grp", "zyg_2grp", "divorce", "sex_T1", "age_T1", "gff_T1", "fc_T1", "qol_T1", "hap_T1", "sat_T1", "AD_T1", "SOMA_T1", "SOC_T1", "THOU_T1", "sex_T2", "age_T2", "gff_T2", "fc_T2", "qol_T2", "hap_T2", "sat_T2", "AD_T2", "SOMA_T2", "SOC_T2", "THOU_T2", "sex_Tb", "age_Tb", "gff_Tb", "fc_Tb","qol_Tb", "hap_Tb", "sat_Tb", "AD_Tb","SOMA_Tb","SOC_Tb", "THOU_Tb","sex_Ts", "age_Ts", "gff_Ts", "fc_Ts", "qol_Ts", "hap_Ts", "sat_Ts", "AD_Ts","SOMA_Ts","SOC_Ts", "THOU_Ts")]
# save("GFF", file = "GFF.rda")
# system(paste("open ",shQuote(getwd(), type = "csh")))
# update_wordlist get_wordlist(pkg = "~/bin/umx")
# 1. Figure out what things are.
# table(x$sex_Tb) # all 0 so male = 0
# table(x$sex_Ts) # all 1 so female = 1
# umx_aggregate(sex_T2 ~ zyg_6grp, data = x)
# |zyg_6grp |sex_T2 |
# |:-----------|:------------------|
# |1 (n = 448) |male 448; female 0 |
# |2 (n = 389) |male 389; female 0 |
# |3 (n = 668) |male 0; female 668 |
# |4 (n = 484) |male 0; female 484 |
# |5 (n = 504) |male 0; female 504 |
# |6 (n = 407) |male 407; female 0 |
# umx_aggregate(sex_T1 ~ zyg_6grp, data = x)
# |zyg_6grp |sex_T1 |
# |:-----------|:------------------|
# |1 (n = 457) |male 457; female 0 |
# |2 (n = 391) |male 391; female 0 |
# |3 (n = 661) |male 0; female 661 |
# |4 (n = 478) |male 0; female 478 |
# |5 (n = 426) |male 426; female 0 |
# |6 (n = 460) |male 0; female 460 |
# ===================================
# = General Family Functioning data =
# ===================================
#' Twin data: General Family Functioning, divorce, and wellbeing.
#'
#' Measures of family functioning, happiness and related variables in twins, and
#' their brothers and sisters. (see details)
#'
#' @details
#' Several scales in the data are described in van der Aa et al. (2010).
#' General Family Functioning (GFF) refers to adolescents' evaluations general family health
#' vs. pathology. It assesses problem solving, communication, roles within the household,
#' affection, and control. GFF was assessed with a Dutch translation of the General Functioning
#' sub-scale of the McMaster Family Assessment Device (FAD) (Epstein et al., 1983).
#'
#' Family Conflict (FC) refers to adolescents' evaluations of the amount of openly
#' expressed anger, aggression, and conflict among family members. Conflict
#' sub-scale of the Family Environment Scale (FES) (Moos, 1974)
#'
#' Quality of life in general (QLg) was assessed with the 10-step Cantril
#' Ladder from best- to worst-possible life (Cantril, 1965).
#'
#' \describe{
#' \item{zyg_6grp}{Six-level measure of zygosity: 'MZMM', 'DZMM', 'MZFF', 'DZFF', 'DZMF', 'DZFM'}
#' \item{zyg_2grp}{Two-level measure of zygosity: 'MZ', 'DZ'}
#' \item{divorce}{Parental divorce status: 0 = No, 1 = Yes}
#' \item{sex_T1}{Sex of twin 1: 0 = "male", 1 = "female"}
#' \item{age_T1}{Age of twin 1 (years)}
#' \item{gff_T1}{General family functioning for twin 1}
#' \item{fc_T1}{Family conflict sub-scale of the FES}
#' \item{qol_T1}{Quality of life for twin 1}
#' \item{hap_T1}{General happiness for twin 1}
#' \item{sat_T1}{Satisfaction with life for twin 1}
#' \item{AD_T1}{Anxiety and Depression for twin 1}
#' \item{SOMA_T1}{Somatic complaints for twin 1}
#' \item{SOC_T1}{Social problems for twin 1}
#' \item{THOU_T1}{Thought disorder problems for twin 1}
#' \item{sex_T2}{Sex of twin 2}
#' \item{age_T2}{Age of twin 2}
#' \item{gff_T2}{General family functioning for twin 2}
#' \item{fc_T2}{Family conflict sub-scale of the FES}
#' \item{qol_T2}{Quality of life for twin 2}
#' \item{hap_T2}{General happiness for twin 2}
#' \item{sat_T2}{Satisfaction with life for twin 2}
#' \item{AD_T2}{Anxiety and Depression for twin 2}
#' \item{SOMA_T2}{Somatic complaints for twin 2}
#' \item{SOC_T2}{Social problems for twin 2}
#' \item{THOU_T2}{Thought disorder problems for twin 2}
#' \item{sex_Ta}{Sex of sib 1}
#' \item{age_Ta}{Age of sib 1}
#' \item{gff_Ta}{General family functioning for sib 1}
#' \item{fc_Ta}{Family conflict sub-scale of the FES}
#' \item{qol_Ta}{Quality of life for sib 1}
#' \item{hap_Ta}{General happiness for sib 1}
#' \item{sat_Ta}{Satisfaction with life for sib 1}
#' \item{AD_Ta}{Anxiety and Depression for sib 1}
#' \item{SOMA_Ta}{Somatic complaints for sib 1}
#' \item{SOC_Ta}{Social problems for sib 1}
#' \item{THOU_Ta}{Thought disorder problems for sib 1}
#' \item{sex_Ts}{Sex of sib 2}
#' \item{age_Ts}{Age of sib 2}
#' \item{gff_Ts}{General family functioning for sib 2}
#' \item{fc_Ts}{Family conflict sub-scale of the FES}
#' \item{qol_Ts}{Quality of life for sib 2}
#' \item{hap_Ts}{General happiness for sib 2}
#' \item{sat_Ts}{Satisfaction with life for sib 2}
#' \item{AD_Ts}{Anxiety and Depression for sib 2}
#' \item{SOMA_Ts}{Somatic complaints for sib 2}
#' \item{SOC_Ts}{Social problems for sib 2}
#' \item{THOU_Ts}{Thought disorder problems for sib 2}
#' }
#' @docType data
#' @keywords datasets
#' @family datasets
#' @name GFF
#' @usage data(GFF)
#' @format A data frame with 1000 rows and 8 variables:
#' @references van der Aa, N., Boomsma, D. I., Rebollo-Mesa, I., Hudziak, J. J., & Bartels,
#' M. (2010). Moderation of genetic factors by parental divorce in adolescents'
#' evaluations of family functioning and subjective wellbeing. Twin Research
#' and Human Genetics, 13(2), 143-162. doi:10.1375/twin.13.2.143
#' @examples
#' # Twin 1 variables (end in '_T1')
#' data(GFF)
#' umx_names(GFF, "1$") # Just variables ending in 1 (twin 1)
#' str(GFF) # first few rows
#'
#' m1 = umxACE(selDVs= "gff", sep = "_T",
#' mzData = subset(GFF, zyg_2grp == "MZ"),
#' dzData = subset(GFF, zyg_2grp == "DZ")
#' )
#'
NULL
# ================================
# = Anthropometric data on twins =
# ================================
#' Anthropometric data on twins
#'
#' A dataset containing height, weight, BMI, and skin-fold fat measures in several
#' hundred US twin families participating in the MCV Cardiovascular Twin Study (PI Schieken)
#'
#' \itemize{
#' \item fan FamilyID (t1=male,t2=female)
#' \item zyg Zygosity 1:mzm, 2:mzf, 3:dzm, 4:dzf, 5:dzo
#' \item ht_T1 Height of twin 1 (cm)
#' \item wt_T1 Weight of twin 1 (kg)
#' \item bmi_T1 BMI of twin 1
#' \item bml_T1 log BMI of twin 1
#' \item bic_T1 Biceps Skinfold of twin 1
#' \item caf_T1 Calf Skinfold of twin 1
#' \item ssc_T1 Subscapular Skinfold of twin 1
#' \item sil_T1 Suprailiacal Skinfold of twin 1
#' \item tri_T1 Triceps Skinfold of twin 1
#' \item ht_T2 Height of twin 2
#' \item wt_T2 Weight of twin 2
#' \item bmi_T2 BMI of twin 2
#' \item bml_T2 log BMI of twin 2
#' \item bic_T2 Biceps Skinfold of twin 2
#' \item caf_T2 Calf Skinfold of twin 2
#' \item ssc_T2 Subscapular Skinfold of twin 2
#' \item sil_T2 Suprailiacal Skinfold of twin 2
#' \item tri_T2 Triceps Skinfold of twin 2
#' }
#'
#' @docType data
#' @keywords datasets
#' @family datasets
#' @name us_skinfold_data
#' @references Moskowitz, W. B., Schwartz, P. F., & Schieken, R. M. (1999).
#' Childhood passive smoking, race, and coronary artery disease risk:
#' the MCV Twin Study. Medical College of Virginia.
#' Archives of Pediatrics and Adolescent Medicine, \strong{153}, 446-453.
#' \url{https://www.ncbi.nlm.nih.gov/pubmed/10323623}
#' @usage data(us_skinfold_data)
#' @format A data frame with 53940 rows and 10 variables
#' @examples
#' data(us_skinfold_data)
#' str(us_skinfold_data)
#' par(mfrow = c(1, 2)) # 1 rows and 3 columns
#' plot(ht_T1 ~ht_T2, ylim = c(130, 165), data = subset(us_skinfold_data, zyg == 1))
#' plot(ht_T1 ~ht_T2, ylim = c(130, 165), data = subset(us_skinfold_data, zyg == 3))
#' par(mfrow = c(1, 1)) # back to as it was
NULL
# Load Data
# iqdat = read.table(file = "~/Desktop/IQ.txt", header = TRUE)
# iqdat$zygosity = NA
# iqdat$zygosity[iqdat$zyg %in% 1] = "MZ"
# iqdat$zygosity[iqdat$zyg %in% 2] = "DZ"
# iqdat = iqdat[, c('zygosity','IQ1_T1','IQ2_T1','IQ3_T1','IQ4_T1','IQ1_T2','IQ2_T2','IQ3_T2','IQ4_T2')]
# head(iqdat); dim(iqdat); str(iqdat)
# names(iqdat) = c('zygosity', 'IQ_age1_T1','IQ_age2_T1','IQ_age3_T1','IQ_age4_T1','IQ_age1_T2','IQ_age2_T2','IQ_age3_T2','IQ_age4_T2')
# save("iqdat", file = "iqdat.rda")
# system(paste("open ",shQuote(getwd(), type = "csh")))
# ==============================
# = IQ measured longitudinally =
# ==============================
#' Twin data: IQ measured longitudinally
#'
#' Measures of IQ across four ages in 261 pairs of identical twins and 301 pairs of fraternal (DZ) twins. (see details)
#' @details
#' \itemize{
#' \item zygosity Zygosity (MZ or DZ)
#' \item IQ_age1_T1 T1 IQ measured at age 1
#' \item IQ_age2_T1 T1 IQ measured at age 2
#' \item IQ_age3_T1 T1 IQ measured at age 3
#' \item IQ_age4_T1 T1 IQ measured at age 4
#' \item IQ_age1_T2 T2 IQ measured at age 1
#' \item IQ_age2_T2 T2 IQ measured at age 2
#' \item IQ_age3_T2 T2 IQ measured at age 3
#' \item IQ_age4_T2 T2 IQ measured at age 4
#' }
#'
#' @docType data
#' @keywords datasets
#' @family datasets
#' @name iqdat
#' @references TODO
#' @usage data(iqdat)
#' @format A data frame with 562 rows and 9 variables
#' @examples
#' data(iqdat)
#' str(iqdat)
#' par(mfrow = c(1, 3)) # 1 rows and 3 columns
#' plot(IQ_age4_T1 ~ IQ_age4_T2, ylim = c(50, 150), data = subset(iqdat, zygosity == "MZ"))
#' plot(IQ_age4_T1 ~ IQ_age4_T2, ylim = c(50, 150), data = subset(iqdat, zygosity == "DZ"))
#' plot(IQ_age1_T1 ~ IQ_age4_T2, data = subset(iqdat, zygosity == "MZ"))
#' par(mfrow = c(1, 1)) # back to as it was
NULL | /R/datasets.R | no_license | guhjy/umx | R | false | false | 11,067 | r | #
# Copyright 2007-2017 Timothy C. Bates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# How I coded this data from the Boulder example
#
# GFF = read.table("~/bin/umx/data/DHBQ_bs.dat", header = T, sep = "\t", as.is = c(T), na.strings = -999)
# x = umx_rename(GFF, old = "zyg2" , replace = "zyg_2grp"); names(x)
# x = umx_rename(x , old = "zyg" , replace = "zyg_6grp"); names(x)
# x = umx_rename(x , grep = "([12bs])$", replace = "_T\\1") ; names(x)
# x$sex_T1 = factor(x$sex_T1, levels = 0:1, labels = c("male", "female"))
# x$sex_T2 = factor(x$sex_T2, levels = 0:1, labels = c("male", "female"))
# x$sex_Tb = factor(x$sex_Tb, levels = 0:1, labels = c("male", "female"))
# x$sex_Ts = factor(x$sex_Ts, levels = 0:1, labels = c("male", "female"))
# x$zyg_6grp = factor(x$zyg_6grp, levels = 1:6, labels = c("MZMM", "DZMM", "MZFF", "DZFF", "DZFM", "DZMF"))
# GFF$zyg_2grp = factor(GFF$zyg_2grp, levels = 1:2, labels = c("MZ", "DZ"))
# GFF = GFF[, c("zyg_6grp", "zyg_2grp", "divorce", "sex_T1", "age_T1", "gff_T1", "fc_T1", "qol_T1", "hap_T1", "sat_T1", "AD_T1", "SOMA_T1", "SOC_T1", "THOU_T1", "sex_T2", "age_T2", "gff_T2", "fc_T2", "qol_T2", "hap_T2", "sat_T2", "AD_T2", "SOMA_T2", "SOC_T2", "THOU_T2", "sex_Tb", "age_Tb", "gff_Tb", "fc_Tb","qol_Tb", "hap_Tb", "sat_Tb", "AD_Tb","SOMA_Tb","SOC_Tb", "THOU_Tb","sex_Ts", "age_Ts", "gff_Ts", "fc_Ts", "qol_Ts", "hap_Ts", "sat_Ts", "AD_Ts","SOMA_Ts","SOC_Ts", "THOU_Ts")]
# save("GFF", file = "GFF.rda")
# system(paste("open ",shQuote(getwd(), type = "csh")))
# update_wordlist get_wordlist(pkg = "~/bin/umx")
# 1. Figure out what things are.
# table(x$sex_Tb) # all 0 so male = 0
# table(x$sex_Ts) # all 1 so female = 1
# umx_aggregate(sex_T2 ~ zyg_6grp, data = x)
# |zyg_6grp |sex_T2 |
# |:-----------|:------------------|
# |1 (n = 448) |male 448; female 0 |
# |2 (n = 389) |male 389; female 0 |
# |3 (n = 668) |male 0; female 668 |
# |4 (n = 484) |male 0; female 484 |
# |5 (n = 504) |male 0; female 504 |
# |6 (n = 407) |male 407; female 0 |
# umx_aggregate(sex_T1 ~ zyg_6grp, data = x)
# |zyg_6grp |sex_T1 |
# |:-----------|:------------------|
# |1 (n = 457) |male 457; female 0 |
# |2 (n = 391) |male 391; female 0 |
# |3 (n = 661) |male 0; female 661 |
# |4 (n = 478) |male 0; female 478 |
# |5 (n = 426) |male 426; female 0 |
# |6 (n = 460) |male 0; female 460 |
# ===================================
# = General Family Functioning data =
# ===================================
#' Twin data: General Family Functioning, divorce, and wellbeing.
#'
#' Measures of family functioning, happiness and related variables in twins, and
#' their brothers and sisters. (see details)
#'
#' @details
#' Several scales in the data are described in van der Aa et al. (2010).
#' General Family Functioning (GFF) refers to adolescents' evaluations general family health
#' vs. pathology. It assesses problem solving, communication, roles within the household,
#' affection, and control. GFF was assessed with a Dutch translation of the General Functioning
#' sub-scale of the McMaster Family Assessment Device (FAD) (Epstein et al., 1983).
#'
#' Family Conflict (FC) refers to adolescents' evaluations of the amount of openly
#' expressed anger, aggression, and conflict among family members. Conflict
#' sub-scale of the Family Environment Scale (FES) (Moos, 1974)
#'
#' Quality of life in general (QLg) was assessed with the 10-step Cantril
#' Ladder from best- to worst-possible life (Cantril, 1965).
#'
#' \describe{
#' \item{zyg_6grp}{Six-level measure of zygosity: 'MZMM', 'DZMM', 'MZFF', 'DZFF', 'DZMF', 'DZFM'}
#' \item{zyg_2grp}{Two-level measure of zygosity: 'MZ', 'DZ'}
#' \item{divorce}{Parental divorce status: 0 = No, 1 = Yes}
#' \item{sex_T1}{Sex of twin 1: 0 = "male", 1 = "female"}
#' \item{age_T1}{Age of twin 1 (years)}
#' \item{gff_T1}{General family functioning for twin 1}
#' \item{fc_T1}{Family conflict sub-scale of the FES}
#' \item{qol_T1}{Quality of life for twin 1}
#' \item{hap_T1}{General happiness for twin 1}
#' \item{sat_T1}{Satisfaction with life for twin 1}
#' \item{AD_T1}{Anxiety and Depression for twin 1}
#' \item{SOMA_T1}{Somatic complaints for twin 1}
#' \item{SOC_T1}{Social problems for twin 1}
#' \item{THOU_T1}{Thought disorder problems for twin 1}
#' \item{sex_T2}{Sex of twin 2}
#' \item{age_T2}{Age of twin 2}
#' \item{gff_T2}{General family functioning for twin 2}
#' \item{fc_T2}{Family conflict sub-scale of the FES}
#' \item{qol_T2}{Quality of life for twin 2}
#' \item{hap_T2}{General happiness for twin 2}
#' \item{sat_T2}{Satisfaction with life for twin 2}
#' \item{AD_T2}{Anxiety and Depression for twin 2}
#' \item{SOMA_T2}{Somatic complaints for twin 2}
#' \item{SOC_T2}{Social problems for twin 2}
#' \item{THOU_T2}{Thought disorder problems for twin 2}
#' \item{sex_Ta}{Sex of sib 1}
#' \item{age_Ta}{Age of sib 1}
#' \item{gff_Ta}{General family functioning for sib 1}
#' \item{fc_Ta}{Family conflict sub-scale of the FES}
#' \item{qol_Ta}{Quality of life for sib 1}
#' \item{hap_Ta}{General happiness for sib 1}
#' \item{sat_Ta}{Satisfaction with life for sib 1}
#' \item{AD_Ta}{Anxiety and Depression for sib 1}
#' \item{SOMA_Ta}{Somatic complaints for sib 1}
#' \item{SOC_Ta}{Social problems for sib 1}
#' \item{THOU_Ta}{Thought disorder problems for sib 1}
#' \item{sex_Ts}{Sex of sib 2}
#' \item{age_Ts}{Age of sib 2}
#' \item{gff_Ts}{General family functioning for sib 2}
#' \item{fc_Ts}{Family conflict sub-scale of the FES}
#' \item{qol_Ts}{Quality of life for sib 2}
#' \item{hap_Ts}{General happiness for sib 2}
#' \item{sat_Ts}{Satisfaction with life for sib 2}
#' \item{AD_Ts}{Anxiety and Depression for sib 2}
#' \item{SOMA_Ts}{Somatic complaints for sib 2}
#' \item{SOC_Ts}{Social problems for sib 2}
#' \item{THOU_Ts}{Thought disorder problems for sib 2}
#' }
#' @docType data
#' @keywords datasets
#' @family datasets
#' @name GFF
#' @usage data(GFF)
#' @format A data frame with 1000 rows and 8 variables:
#' @references van der Aa, N., Boomsma, D. I., Rebollo-Mesa, I., Hudziak, J. J., & Bartels,
#' M. (2010). Moderation of genetic factors by parental divorce in adolescents'
#' evaluations of family functioning and subjective wellbeing. Twin Research
#' and Human Genetics, 13(2), 143-162. doi:10.1375/twin.13.2.143
#' @examples
#' # Twin 1 variables (end in '_T1')
#' data(GFF)
#' umx_names(GFF, "1$") # Just variables ending in 1 (twin 1)
#' str(GFF) # first few rows
#'
#' m1 = umxACE(selDVs= "gff", sep = "_T",
#' mzData = subset(GFF, zyg_2grp == "MZ"),
#' dzData = subset(GFF, zyg_2grp == "DZ")
#' )
#'
NULL
# ================================
# = Anthropometric data on twins =
# ================================
#' Anthropometric data on twins
#'
#' A dataset containing height, weight, BMI, and skin-fold fat measures in several
#' hundred US twin families participating in the MCV Cardiovascular Twin Study (PI Schieken)
#'
#' \itemize{
#' \item fan FamilyID (t1=male,t2=female)
#' \item zyg Zygosity 1:mzm, 2:mzf, 3:dzm, 4:dzf, 5:dzo
#' \item ht_T1 Height of twin 1 (cm)
#' \item wt_T1 Weight of twin 1 (kg)
#' \item bmi_T1 BMI of twin 1
#' \item bml_T1 log BMI of twin 1
#' \item bic_T1 Biceps Skinfold of twin 1
#' \item caf_T1 Calf Skinfold of twin 1
#' \item ssc_T1 Subscapular Skinfold of twin 1
#' \item sil_T1 Suprailiacal Skinfold of twin 1
#' \item tri_T1 Triceps Skinfold of twin 1
#' \item ht_T2 Height of twin 2
#' \item wt_T2 Weight of twin 2
#' \item bmi_T2 BMI of twin 2
#' \item bml_T2 log BMI of twin 2
#' \item bic_T2 Biceps Skinfold of twin 2
#' \item caf_T2 Calf Skinfold of twin 2
#' \item ssc_T2 Subscapular Skinfold of twin 2
#' \item sil_T2 Suprailiacal Skinfold of twin 2
#' \item tri_T2 Triceps Skinfold of twin 2
#' }
#'
#' @docType data
#' @keywords datasets
#' @family datasets
#' @name us_skinfold_data
#' @references Moskowitz, W. B., Schwartz, P. F., & Schieken, R. M. (1999).
#' Childhood passive smoking, race, and coronary artery disease risk:
#' the MCV Twin Study. Medical College of Virginia.
#' Archives of Pediatrics and Adolescent Medicine, \strong{153}, 446-453.
#' \url{https://www.ncbi.nlm.nih.gov/pubmed/10323623}
#' @usage data(us_skinfold_data)
#' @format A data frame with 53940 rows and 10 variables
#' @examples
#' data(us_skinfold_data)
#' str(us_skinfold_data)
#' par(mfrow = c(1, 2)) # 1 rows and 3 columns
#' plot(ht_T1 ~ht_T2, ylim = c(130, 165), data = subset(us_skinfold_data, zyg == 1))
#' plot(ht_T1 ~ht_T2, ylim = c(130, 165), data = subset(us_skinfold_data, zyg == 3))
#' par(mfrow = c(1, 1)) # back to as it was
NULL
# Load Data
# iqdat = read.table(file = "~/Desktop/IQ.txt", header = TRUE)
# iqdat$zygosity = NA
# iqdat$zygosity[iqdat$zyg %in% 1] = "MZ"
# iqdat$zygosity[iqdat$zyg %in% 2] = "DZ"
# iqdat = iqdat[, c('zygosity','IQ1_T1','IQ2_T1','IQ3_T1','IQ4_T1','IQ1_T2','IQ2_T2','IQ3_T2','IQ4_T2')]
# head(iqdat); dim(iqdat); str(iqdat)
# names(iqdat) = c('zygosity', 'IQ_age1_T1','IQ_age2_T1','IQ_age3_T1','IQ_age4_T1','IQ_age1_T2','IQ_age2_T2','IQ_age3_T2','IQ_age4_T2')
# save("iqdat", file = "iqdat.rda")
# system(paste("open ",shQuote(getwd(), type = "csh")))
# ==============================
# = IQ measured longitudinally =
# ==============================
#' Twin data: IQ measured longitudinally
#'
#' Measures of IQ across four ages in 261 pairs of identical twins and 301 pairs of fraternal (DZ) twins. (see details)
#' @details
#' \itemize{
#' \item zygosity Zygosity (MZ or DZ)
#' \item IQ_age1_T1 T1 IQ measured at age 1
#' \item IQ_age2_T1 T1 IQ measured at age 2
#' \item IQ_age3_T1 T1 IQ measured at age 3
#' \item IQ_age4_T1 T1 IQ measured at age 4
#' \item IQ_age1_T2 T2 IQ measured at age 1
#' \item IQ_age2_T2 T2 IQ measured at age 2
#' \item IQ_age3_T2 T2 IQ measured at age 3
#' \item IQ_age4_T2 T2 IQ measured at age 4
#' }
#'
#' @docType data
#' @keywords datasets
#' @family datasets
#' @name iqdat
#' @references TODO
#' @usage data(iqdat)
#' @format A data frame with 562 rows and 9 variables
#' @examples
#' data(iqdat)
#' str(iqdat)
#' par(mfrow = c(1, 3)) # 1 rows and 3 columns
#' plot(IQ_age4_T1 ~ IQ_age4_T2, ylim = c(50, 150), data = subset(iqdat, zygosity == "MZ"))
#' plot(IQ_age4_T1 ~ IQ_age4_T2, ylim = c(50, 150), data = subset(iqdat, zygosity == "DZ"))
#' plot(IQ_age1_T1 ~ IQ_age4_T2, data = subset(iqdat, zygosity == "MZ"))
#' par(mfrow = c(1, 1)) # back to as it was
NULL |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/custom.functions.R
\name{convert_ARF.D47_to_ARF.Dennis.temp}
\alias{convert_ARF.D47_to_ARF.Dennis.temp}
\title{Dennis Calibration in ARF ref frame, using ARF D47 values}
\usage{
convert_ARF.D47_to_ARF.Dennis.temp(D47)
}
\description{
Dennis Calibration in ARF ref frame, using ARF D47 values
}
| /man/convert_ARF.D47_to_ARF.Dennis.temp.Rd | no_license | cubessil/isoprocessCUBES | R | false | true | 372 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/custom.functions.R
\name{convert_ARF.D47_to_ARF.Dennis.temp}
\alias{convert_ARF.D47_to_ARF.Dennis.temp}
\title{Dennis Calibration in ARF ref frame, using ARF D47 values}
\usage{
convert_ARF.D47_to_ARF.Dennis.temp(D47)
}
\description{
Dennis Calibration in ARF ref frame, using ARF D47 values
}
|
#' Title Compound Interest
#'
#' @param p numeric
#' @param r numeric
#' @param t numeric
#'
#' @return numeric
#' @export
#'
#' @examples
#' compounding_interest(6, 0.0425, 6) # 1283.68
compounding_interest <- function(p, r, t) {
p*((1+r)^t)
}
| /R/compound_interest.R | permissive | devopsuser94/MyFirstGitRepoR | R | false | false | 247 | r | #' Title Compound Interest
#'
#' @param p numeric
#' @param r numeric
#' @param t numeric
#'
#' @return numeric
#' @export
#'
#' @examples
#' compounding_interest(6, 0.0425, 6) # 1283.68
compounding_interest <- function(p, r, t) {
p*((1+r)^t)
}
|
testlist <- list(x = c(NaN, -3.29834288070943e+231, -1.07730874267432e+236, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, -1.07730874267519e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, 6.0973514015456e+163, 9.08217799640982e-97, 1.39065275988475e-309, 1.64548574512489e-257, 3.64686912294044e-315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) | /blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609956768-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 640 | r | testlist <- list(x = c(NaN, -3.29834288070943e+231, -1.07730874267432e+236, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, -1.07730874267519e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, -1.07730874267432e+236, 6.0973514015456e+163, 9.08217799640982e-97, 1.39065275988475e-309, 1.64548574512489e-257, 3.64686912294044e-315, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphical.R
\docType{methods}
\name{factorHeatmap}
\alias{factorHeatmap}
\alias{factorHeatmap,BiclusterExperiment,character-method}
\alias{factorHeatmap,BiclusterExperiment,numeric-method}
\alias{factorHeatmap,BiclusterExperiment,BiclusterStrategy-method}
\title{Plot a heatmap showing bicluster membership of samples or features}
\usage{
factorHeatmap(bce, bcs, type, ordering = "input", ...)
\S4method{factorHeatmap}{BiclusterExperiment,character}(bce, bcs, type,
ordering = "input", ...)
\S4method{factorHeatmap}{BiclusterExperiment,numeric}(bce, bcs, type,
ordering = "input", ...)
\S4method{factorHeatmap}{BiclusterExperiment,BiclusterStrategy}(bce, bcs,
type = c("feature", "sample"), ordering = c("input", "distance",
"cluster"), phenoLabels = c(), biclustLabels = c(),
colNames = FALSE)
}
\arguments{
\item{bce}{A BiclusterExperiment object}
\item{bcs}{The name or index of a BiclusterStrategy contained by \code{bce},
or the BiclusterStrategy object itself}
\item{type}{either "feature" for feature-bicluster membership or "sample" for
sample-bicluster membership}
\item{ordering}{The default \code{ordering = "input"} preserves the order
of samples or features from \code{bce@assayData}. \code{"distance"}
reorders based on Euclidean distance calculated from \code{bce@assayData}.
\code{"cluster"} reorders based on bicluster membership.}
\item{...}{Optional arguments \code{phenoLabels}, \code{biclustLabels},
\code{ordering}, and \code{colNames}, described below:}
\item{phenoLabels}{an optional character vector of labels to annotate. If
\code{type = "feature"}, \code{phenoLabels} should be column names of
\code{Biobase::phenoData(bce)}}
\item{biclustLabels}{an optional character vector of labels to annotate.
Should be elements of \code{bcNames(bcs)}. Both \code{phenoLabels} and
\code{biclustLabels} may be specified.}
\item{colNames}{if \code{TRUE}, labels the samples/features}
}
\value{
a \code{\link[pheatmap]{pheatmap}-class} object
}
\description{
Reads data from \code{BiclusterStrategy@factors} to create a heatmap of
bicluster membership across all samples or features.
}
\section{Methods (by class)}{
\itemize{
\item \code{bce = BiclusterExperiment,bcs = character}: Plots a matrix factor from the
\code{\link{BiclusterStrategy-class}} object named \code{bcs} in
\code{bce@strategies}.
\item \code{bce = BiclusterExperiment,bcs = numeric}: Plots a matrix factor from the
\code{\link{BiclusterStrategy-class}} object at the index specified by
\code{bcs}
\item \code{bce = BiclusterExperiment,bcs = BiclusterStrategy}: Plots a matrix factor from \code{bcs}.
}}
\examples{
bce <- BiclusterExperiment(yeast_benchmark[[1]])
bce <- addStrat(bce, k = 2, method = "als-nmf")
bcs <- getStrat(bce, 1)
factorHeatmap(bce, bcs, type = "sample")
}
| /man/factorHeatmap.Rd | no_license | jonalim/mfBiclust | R | false | true | 2,864 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphical.R
\docType{methods}
\name{factorHeatmap}
\alias{factorHeatmap}
\alias{factorHeatmap,BiclusterExperiment,character-method}
\alias{factorHeatmap,BiclusterExperiment,numeric-method}
\alias{factorHeatmap,BiclusterExperiment,BiclusterStrategy-method}
\title{Plot a heatmap showing bicluster membership of samples or features}
\usage{
factorHeatmap(bce, bcs, type, ordering = "input", ...)
\S4method{factorHeatmap}{BiclusterExperiment,character}(bce, bcs, type,
ordering = "input", ...)
\S4method{factorHeatmap}{BiclusterExperiment,numeric}(bce, bcs, type,
ordering = "input", ...)
\S4method{factorHeatmap}{BiclusterExperiment,BiclusterStrategy}(bce, bcs,
type = c("feature", "sample"), ordering = c("input", "distance",
"cluster"), phenoLabels = c(), biclustLabels = c(),
colNames = FALSE)
}
\arguments{
\item{bce}{A BiclusterExperiment object}
\item{bcs}{The name or index of a BiclusterStrategy contained by \code{bce},
or the BiclusterStrategy object itself}
\item{type}{either "feature" for feature-bicluster membership or "sample" for
sample-bicluster membership}
\item{ordering}{The default \code{ordering = "input"} preserves the order
of samples or features from \code{bce@assayData}. \code{"distance"}
reorders based on Euclidean distance calculated from \code{bce@assayData}.
\code{"cluster"} reorders based on bicluster membership.}
\item{...}{Optional arguments \code{phenoLabels}, \code{biclustLabels},
\code{ordering}, and \code{colNames}, described below:}
\item{phenoLabels}{an optional character vector of labels to annotate. If
\code{type = "feature"}, \code{phenoLabels} should be column names of
\code{Biobase::phenoData(bce)}}
\item{biclustLabels}{an optional character vector of labels to annotate.
Should be elements of \code{bcNames(bcs)}. Both \code{phenoLabels} and
\code{biclustLabels} may be specified.}
\item{colNames}{if \code{TRUE}, labels the samples/features}
}
\value{
a \code{\link[pheatmap]{pheatmap}-class} object
}
\description{
Reads data from \code{BiclusterStrategy@factors} to create a heatmap of
bicluster membership across all samples or features.
}
\section{Methods (by class)}{
\itemize{
\item \code{bce = BiclusterExperiment,bcs = character}: Plots a matrix factor from the
\code{\link{BiclusterStrategy-class}} object named \code{bcs} in
\code{bce@strategies}.
\item \code{bce = BiclusterExperiment,bcs = numeric}: Plots a matrix factor from the
\code{\link{BiclusterStrategy-class}} object at the index specified by
\code{bcs}
\item \code{bce = BiclusterExperiment,bcs = BiclusterStrategy}: Plots a matrix factor from \code{bcs}.
}}
\examples{
bce <- BiclusterExperiment(yeast_benchmark[[1]])
bce <- addStrat(bce, k = 2, method = "als-nmf")
bcs <- getStrat(bce, 1)
factorHeatmap(bce, bcs, type = "sample")
}
|
centers.interval2 <- function(sym.data) {
idn <- all(sym.data$sym.var.types == sym.data$sym.var.types[1])
if (idn == FALSE) {
stop("All variables have to be of the same type")
}
if ((sym.data$sym.var.types[1] != "$I")) {
stop("Variables have to be continuos or Interval")
} else {
nn <- sym.data$N
}
mm <- sym.data$M
centers <- matrix(0, nn, mm)
ratios <- matrix(0, nn, mm)
centers <- as.data.frame(centers)
ratios <- as.data.frame(ratios)
rownames(centers) <- sym.data$sym.obj.names
colnames(centers) <- sym.data$sym.var.names
rownames(ratios) <- sym.data$sym.obj.names
colnames(ratios) <- sym.data$sym.var.names
for (i in 1:nn) {
for (j in 1:mm) {
sym.var.act <- sym.var(sym.data, j)
min.val <- sym.var.act$var.data.vector[i, 1]
max.val <- sym.var.act$var.data.vector[i, 2]
centers[i, j] <- (min.val + max.val) / 2
ratios[i, j] <- (-min.val + max.val) / 2
}
}
return(list(centers = centers, ratios = ratios))
}
| /R/centers.interval2.r | no_license | Frenchyy1/RSDA | R | false | false | 1,000 | r | centers.interval2 <- function(sym.data) {
idn <- all(sym.data$sym.var.types == sym.data$sym.var.types[1])
if (idn == FALSE) {
stop("All variables have to be of the same type")
}
if ((sym.data$sym.var.types[1] != "$I")) {
stop("Variables have to be continuos or Interval")
} else {
nn <- sym.data$N
}
mm <- sym.data$M
centers <- matrix(0, nn, mm)
ratios <- matrix(0, nn, mm)
centers <- as.data.frame(centers)
ratios <- as.data.frame(ratios)
rownames(centers) <- sym.data$sym.obj.names
colnames(centers) <- sym.data$sym.var.names
rownames(ratios) <- sym.data$sym.obj.names
colnames(ratios) <- sym.data$sym.var.names
for (i in 1:nn) {
for (j in 1:mm) {
sym.var.act <- sym.var(sym.data, j)
min.val <- sym.var.act$var.data.vector[i, 1]
max.val <- sym.var.act$var.data.vector[i, 2]
centers[i, j] <- (min.val + max.val) / 2
ratios[i, j] <- (-min.val + max.val) / 2
}
}
return(list(centers = centers, ratios = ratios))
}
|
library(tidyverse)
devtools::load_all("C:/Users/Jonathan Tannen/Dropbox/sixty_six/posts/svdcov/")
source("C:/Users/Jonathan Tannen/Dropbox/sixty_six/admin_scripts/theme_sixtysix.R")
get_turnout_svd <- function(result_df, election_type, party_grep=NULL, verbose=TRUE, use_log=USE_LOG){
if(!is.null(party_grep)) result_df <- result_df %>% filter(grepl(party_grep, party))
result_df <- result_df %>% filter(grepl(paste0(election_type, "$"), election))
turnout <- result_df %>%
filter(is_topline_office) %>%
group_by(warddiv, election) %>%
summarise(votes = sum(votes)) %>%
group_by() %>%
mutate(target = {if(use_log) log(votes + 1) else votes})
turnout_wide <- turnout %>%
select(warddiv, election, target) %>%
spread(election, target, fill = 0)
turnout_wide_mat <- as.matrix(turnout_wide %>% select(-warddiv))
row.names(turnout_wide_mat) <- turnout_wide$warddiv
svd <- get_svd(turnout_wide_mat, verbose=TRUE, method=SVD_METHOD)
svd@log <- use_log
return(svd)
}
get_pvote_svd <- function(
df_past,
primary_party_regex,
use_primary=TRUE,
use_general=TRUE,
use_log=USE_LOG
){
df_pvote <- df_past %>%
filter(candidate != "Write In") %>%
filter(election == "general" | use_primary) %>%
filter(election == "primary" | use_general) %>%
filter(grepl(primary_party_regex, party, ignore.case=TRUE) | election=="general")
df_pvote <- df_pvote %>%
group_by(election, office, district, warddiv) %>%
mutate(pvote = votes / sum(votes)) %>%
mutate(target = {if(use_log) log(pvote + 0.001) else pvote}) %>%
group_by()
n_cand <- df_pvote %>%
select(election, office, district, candidate) %>%
unique() %>%
group_by(election, office, district) %>%
summarise(n_cand = n()) %>%
mutate(
prior_mean = {if(use_log) log(1/n_cand) else 1/n_cand}
) %>%
ungroup()
df_pvote <- df_pvote %>%
left_join(n_cand) %>%
mutate(target_demean = target - prior_mean)
pvote_wide <- df_pvote %>%
mutate(office=paste0(office, ifelse(is.na(district), "", district))) %>%
unite("key", candidate, office, election) %>%
select(warddiv, key, target_demean) %>%
spread(key, target_demean, fill=0)
pvote_mat <- as.matrix(pvote_wide %>% select(-warddiv))
rownames(pvote_mat) <- pvote_wide$warddiv
svd <- get_svd(
pvote_mat,
n_svd=5,
known_column_means=0,
verbose=TRUE,
method=SVD_METHOD
)
svd@log <- use_log
return(svd)
}
#######################
## PLOTS
#######################
map_precinct_score <- function(svd, col, precinct_sf, adj_area=TRUE){
if(!is(svd, "SVDParams")) stop("params must be of class SVDParams")
precinct_sf$area <- as.numeric(st_area(precinct_sf))
if(adj_area){
if(svd@log){
adj_fe <- function(fe, area) fe - log(area)
} else {
adj_fe <- function(fe, area) fe / area
}
} else {
adj_fe <- function(x, ...) x
}
ggplot(
precinct_sf %>%
left_join(svd@row_scores, by=c("warddiv"="row"))
) +
geom_sf(
aes(fill = adj_fe(!!sym(col), area)),
color= NA
) +
scale_fill_viridis_c("Score")+
theme_map_sixtysix()
}
map_precinct_fe <- function(svd, precinct_sf, adj_area) {
map_precinct_score(svd, "mean", precinct_sf, adj_area) +
scale_fill_viridis_c("Mean")
}
map_precinct_dim <- function(svd, k, precinct_sf){
map_precinct_score(svd, paste0("score.",k), precinct_sf, adj_area=FALSE) +
scale_fill_gradient2(
paste("Score, Dimension", k),
midpoint = 0
)
}
plot_election_score <- function(svd, col){
if(!is(svd, "SVDParams")) stop("svd must be of class SVDParams")
election_df <- svd@col_scores %>%
mutate(
year = asnum(substr(col, 1, 4)),
etype = substr(col, 6, nchar(as.character(col)))
)
ggplot(
election_df,
aes(x=year, y=!!sym(col))
) +
geom_line(
aes(group=year %% 4),
color= strong_green
) +
geom_point(
color = strong_green,
size = 2
) +
facet_grid(etype ~ .) +
xlab("") +
theme_sixtysix() +
ggtitle("election scores", "Grouped by 4 election cycle")
}
plot_election_fe <- function(svd) plot_election_score(svd, "mean")
plot_election_dim <- function(svd, k) plot_election_score(svd, paste0("score.", k))
pause <- function() invisible(readline(prompt = "Press <Enter> to continue..."))
pvote_diagnostics <- function(svd, precinct_sf){
print(
map_precinct_fe(svd, precinct_sf, adj_area=FALSE) +
ggtitle("Precinct means of pvote")
)
pause()
for(k in 1:(ncol(svd@row_scores)-2)){
print(
map_precinct_dim(svd, k, precinct_sf) +
ggtitle(sprintf("pvote Dimension %s", k))
)
pause()
}
}
turnout_diagnostics <- function(svd, precinct_sf){
print(
map_precinct_fe(svd, precinct_sf, adj_area=FALSE) +
ggtitle("Precinct means of turnout")
)
pause()
print(
plot_election_fe(svd) +
ggtitle("Turnout FE")
)
pause()
for(k in 1:(ncol(svd@row_scores)-2)){
print(
map_precinct_dim(svd, k, precinct_sf) +
ggtitle(sprintf("Turnout Dim %s", k))
)
pause()
print(
plot_election_dim(svd, k) +
ggtitle(sprintf("Turnout Dim %s", k))
)
pause()
}
}
diagnostics <- function(needle_params, precinct_sf){
print("Plotting Diagnostics...")
pvote_diagnostics(needle_params@pvote_svd, precinct_sf)
turnout_diagnostics(needle_params@turnout_svd, precinct_sf)
}
if(FALSE){
pvote_svd <- get_pvote_svd(df_past)
if(CONFIG$is_primary){
turnout_svds=list(
"rep" = get_turnout_svd("primary", "^REP"),
"dem" = get_turnout_svd("primary", "^DEM")
)
} else {
turnout_svds=list(
"general" = get_turnout_svd("general")
)
}
needle_params <- needleSVDs(
pvote_svd=pvote_svd,
turnout_svds=turnout_svds,
log=USE_LOG
)
diagnostics(needle_params, divs)
}
| /svd_for_turnout_and_pvote.R | no_license | jtannen/election_needle | R | false | false | 5,885 | r | library(tidyverse)
devtools::load_all("C:/Users/Jonathan Tannen/Dropbox/sixty_six/posts/svdcov/")
source("C:/Users/Jonathan Tannen/Dropbox/sixty_six/admin_scripts/theme_sixtysix.R")
get_turnout_svd <- function(result_df, election_type, party_grep=NULL, verbose=TRUE, use_log=USE_LOG){
if(!is.null(party_grep)) result_df <- result_df %>% filter(grepl(party_grep, party))
result_df <- result_df %>% filter(grepl(paste0(election_type, "$"), election))
turnout <- result_df %>%
filter(is_topline_office) %>%
group_by(warddiv, election) %>%
summarise(votes = sum(votes)) %>%
group_by() %>%
mutate(target = {if(use_log) log(votes + 1) else votes})
turnout_wide <- turnout %>%
select(warddiv, election, target) %>%
spread(election, target, fill = 0)
turnout_wide_mat <- as.matrix(turnout_wide %>% select(-warddiv))
row.names(turnout_wide_mat) <- turnout_wide$warddiv
svd <- get_svd(turnout_wide_mat, verbose=TRUE, method=SVD_METHOD)
svd@log <- use_log
return(svd)
}
get_pvote_svd <- function(
df_past,
primary_party_regex,
use_primary=TRUE,
use_general=TRUE,
use_log=USE_LOG
){
df_pvote <- df_past %>%
filter(candidate != "Write In") %>%
filter(election == "general" | use_primary) %>%
filter(election == "primary" | use_general) %>%
filter(grepl(primary_party_regex, party, ignore.case=TRUE) | election=="general")
df_pvote <- df_pvote %>%
group_by(election, office, district, warddiv) %>%
mutate(pvote = votes / sum(votes)) %>%
mutate(target = {if(use_log) log(pvote + 0.001) else pvote}) %>%
group_by()
n_cand <- df_pvote %>%
select(election, office, district, candidate) %>%
unique() %>%
group_by(election, office, district) %>%
summarise(n_cand = n()) %>%
mutate(
prior_mean = {if(use_log) log(1/n_cand) else 1/n_cand}
) %>%
ungroup()
df_pvote <- df_pvote %>%
left_join(n_cand) %>%
mutate(target_demean = target - prior_mean)
pvote_wide <- df_pvote %>%
mutate(office=paste0(office, ifelse(is.na(district), "", district))) %>%
unite("key", candidate, office, election) %>%
select(warddiv, key, target_demean) %>%
spread(key, target_demean, fill=0)
pvote_mat <- as.matrix(pvote_wide %>% select(-warddiv))
rownames(pvote_mat) <- pvote_wide$warddiv
svd <- get_svd(
pvote_mat,
n_svd=5,
known_column_means=0,
verbose=TRUE,
method=SVD_METHOD
)
svd@log <- use_log
return(svd)
}
#######################
## PLOTS
#######################
map_precinct_score <- function(svd, col, precinct_sf, adj_area=TRUE){
if(!is(svd, "SVDParams")) stop("params must be of class SVDParams")
precinct_sf$area <- as.numeric(st_area(precinct_sf))
if(adj_area){
if(svd@log){
adj_fe <- function(fe, area) fe - log(area)
} else {
adj_fe <- function(fe, area) fe / area
}
} else {
adj_fe <- function(x, ...) x
}
ggplot(
precinct_sf %>%
left_join(svd@row_scores, by=c("warddiv"="row"))
) +
geom_sf(
aes(fill = adj_fe(!!sym(col), area)),
color= NA
) +
scale_fill_viridis_c("Score")+
theme_map_sixtysix()
}
map_precinct_fe <- function(svd, precinct_sf, adj_area) {
map_precinct_score(svd, "mean", precinct_sf, adj_area) +
scale_fill_viridis_c("Mean")
}
map_precinct_dim <- function(svd, k, precinct_sf){
map_precinct_score(svd, paste0("score.",k), precinct_sf, adj_area=FALSE) +
scale_fill_gradient2(
paste("Score, Dimension", k),
midpoint = 0
)
}
plot_election_score <- function(svd, col){
if(!is(svd, "SVDParams")) stop("svd must be of class SVDParams")
election_df <- svd@col_scores %>%
mutate(
year = asnum(substr(col, 1, 4)),
etype = substr(col, 6, nchar(as.character(col)))
)
ggplot(
election_df,
aes(x=year, y=!!sym(col))
) +
geom_line(
aes(group=year %% 4),
color= strong_green
) +
geom_point(
color = strong_green,
size = 2
) +
facet_grid(etype ~ .) +
xlab("") +
theme_sixtysix() +
ggtitle("election scores", "Grouped by 4 election cycle")
}
plot_election_fe <- function(svd) plot_election_score(svd, "mean")
plot_election_dim <- function(svd, k) plot_election_score(svd, paste0("score.", k))
pause <- function() invisible(readline(prompt = "Press <Enter> to continue..."))
pvote_diagnostics <- function(svd, precinct_sf){
print(
map_precinct_fe(svd, precinct_sf, adj_area=FALSE) +
ggtitle("Precinct means of pvote")
)
pause()
for(k in 1:(ncol(svd@row_scores)-2)){
print(
map_precinct_dim(svd, k, precinct_sf) +
ggtitle(sprintf("pvote Dimension %s", k))
)
pause()
}
}
turnout_diagnostics <- function(svd, precinct_sf){
print(
map_precinct_fe(svd, precinct_sf, adj_area=FALSE) +
ggtitle("Precinct means of turnout")
)
pause()
print(
plot_election_fe(svd) +
ggtitle("Turnout FE")
)
pause()
for(k in 1:(ncol(svd@row_scores)-2)){
print(
map_precinct_dim(svd, k, precinct_sf) +
ggtitle(sprintf("Turnout Dim %s", k))
)
pause()
print(
plot_election_dim(svd, k) +
ggtitle(sprintf("Turnout Dim %s", k))
)
pause()
}
}
diagnostics <- function(needle_params, precinct_sf){
print("Plotting Diagnostics...")
pvote_diagnostics(needle_params@pvote_svd, precinct_sf)
turnout_diagnostics(needle_params@turnout_svd, precinct_sf)
}
if(FALSE){
pvote_svd <- get_pvote_svd(df_past)
if(CONFIG$is_primary){
turnout_svds=list(
"rep" = get_turnout_svd("primary", "^REP"),
"dem" = get_turnout_svd("primary", "^DEM")
)
} else {
turnout_svds=list(
"general" = get_turnout_svd("general")
)
}
needle_params <- needleSVDs(
pvote_svd=pvote_svd,
turnout_svds=turnout_svds,
log=USE_LOG
)
diagnostics(needle_params, divs)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_moments.R
\name{plot_moments}
\alias{plot_moments}
\title{Plot function: Display the influence of a covariate}
\usage{
plot_moments(
model,
int_var,
pred_data = NULL,
rug = FALSE,
samples = FALSE,
uncertainty = FALSE,
ex_fun = NULL,
palette = "viridis",
vary_by = NULL
)
}
\arguments{
\item{model}{A fitted model on which the plots are based.}
\item{int_var}{The variable for which influences of the moments shall be
graphically displayed. Has to be in character form.}
\item{pred_data}{Combinations of covariate data, sometimes also known as
"newdata", including the variable of interest, which will be ignored in
later processing.}
\item{rug}{Should the resulting plot be a rug plot?}
\item{samples}{If the provided model is a bamlss model, should the moment
values be "correctly" calculated, using the transformed samples? See
details for details.}
\item{uncertainty}{If \code{TRUE}, displays uncertainty measures about the
covariate influences. Can only be \code{TRUE} if samples is also
\code{TRUE}.}
\item{ex_fun}{An external function \code{function(par) {...}} which
calculates a measure, whose dependency from a certain variable is of
interest. Has to be specified in character form. See examples for an
example.}
\item{palette}{See \code{\link{plot_dist}}.}
\item{vary_by}{Variable name in character form over which to vary the
mean/reference values of explanatory variables. It is passed to
\link{set_mean}. See that documentation for further details.}
}
\description{
This function takes a dataframe of predictions with one row per prediction
and one column for every explanatory variable. Then, those predictions are
held constant while one specific variable is varied over it's whole range
(min-max). Then, the constant variables with the varied interest variables
are predicted and plotted against the expected value and the variance of the
underlying distribution.
}
\details{
The target of this function is to display the influence of a selected effect
on the predicted moments of the modeled distribution. The motivation for
computing influences on the moments of a distribution is its
interpretability: In most cases, the parameters of a distribution do not
equate the moments and as such are only indirectly location, scale or shape
properties, making the computed effects hard to understand.
Navigating through the disarray of link functions, non-parametric effects and
transformations to moments, \code{plot_moments()} supports a wide range of
target distributions. See \link{dists} for details.
Whether a distribution is supported or not depends on whether the underlying
\code{R} object possesses functions to calculate the moments of the
distribution from the predicted parameters. To achieve this for as many
distributional families as possible, we worked together with both the authors
of \link{gamlss} (Rigby and Stasinopoulos 2005) and \link{bamlss} (Umlauf et
al. 2018) and implemented the moment functions for almost all available
distributions in the respective packages. The \link{betareg} family was
implemented in \link{distreg.vis} as well.
}
\examples{
# Generating some data
dat <- model_fam_data(fam_name = "LOGNO")
# Estimating the model
library("gamlss")
model <- gamlss(LOGNO ~ ps(norm2) + binomial1,
~ ps(norm2) + binomial1,
data = dat, family = "LOGNO")
# Get newdata by either specifying an own data.frame, or using set_mean()
# for obtaining mean vals of explanatory variables
ndata_user <- dat[1:5, c("norm2", "binomial1")]
ndata_auto <- set_mean(model_data(model))
# Influence graphs
plot_moments(model, int_var = "norm2", pred_data = ndata_user) # cont. var
plot_moments(model, int_var = "binomial1", pred_data = ndata_user) # discrete var
plot_moments(model, int_var = "norm2", pred_data = ndata_auto) # with new ndata
# If pred_data argument is omitted plot_moments uses mean explanatory
# variables for prediction (using set_mean)
plot_moments(model, int_var = "norm2")
# Rug Plot
plot_moments(model, int_var = "norm2", rug = TRUE)
# Different colour palette
plot_moments(model, int_var = "binomial1", palette = "Dark2")
# Using an external function
ineq <- function(par) {
2 * pnorm((par[["sigma"]] / 2) * sqrt(2)) - 1
}
plot_moments(model, int_var = "norm2", pred_data = ndata_user, ex_fun = "ineq")
}
\references{
Rigby RA, Stasinopoulos DM (2005). "Generalized Additive Models
for Location, Scale and Shape." Journal of the Royal Statistical Society C,
54(3), 507-554.
Umlauf, N, Klein N, Zeileis A (2018). "BAMLSS: Bayesian
Additive Models for Location, Scale and Shape (and Beyond)." Journal of
Computational and Graphical Statistics, 27(3), 612-627.
}
| /man/plot_moments.Rd | no_license | Stan125/distreg.vis | R | false | true | 4,775 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_moments.R
\name{plot_moments}
\alias{plot_moments}
\title{Plot function: Display the influence of a covariate}
\usage{
plot_moments(
model,
int_var,
pred_data = NULL,
rug = FALSE,
samples = FALSE,
uncertainty = FALSE,
ex_fun = NULL,
palette = "viridis",
vary_by = NULL
)
}
\arguments{
\item{model}{A fitted model on which the plots are based.}
\item{int_var}{The variable for which influences of the moments shall be
graphically displayed. Has to be in character form.}
\item{pred_data}{Combinations of covariate data, sometimes also known as
"newdata", including the variable of interest, which will be ignored in
later processing.}
\item{rug}{Should the resulting plot be a rug plot?}
\item{samples}{If the provided model is a bamlss model, should the moment
values be "correctly" calculated, using the transformed samples? See
details for details.}
\item{uncertainty}{If \code{TRUE}, displays uncertainty measures about the
covariate influences. Can only be \code{TRUE} if samples is also
\code{TRUE}.}
\item{ex_fun}{An external function \code{function(par) {...}} which
calculates a measure, whose dependency from a certain variable is of
interest. Has to be specified in character form. See examples for an
example.}
\item{palette}{See \code{\link{plot_dist}}.}
\item{vary_by}{Variable name in character form over which to vary the
mean/reference values of explanatory variables. It is passed to
\link{set_mean}. See that documentation for further details.}
}
\description{
This function takes a dataframe of predictions with one row per prediction
and one column for every explanatory variable. Then, those predictions are
held constant while one specific variable is varied over it's whole range
(min-max). Then, the constant variables with the varied interest variables
are predicted and plotted against the expected value and the variance of the
underlying distribution.
}
\details{
The target of this function is to display the influence of a selected effect
on the predicted moments of the modeled distribution. The motivation for
computing influences on the moments of a distribution is its
interpretability: In most cases, the parameters of a distribution do not
equate the moments and as such are only indirectly location, scale or shape
properties, making the computed effects hard to understand.
Navigating through the disarray of link functions, non-parametric effects and
transformations to moments, \code{plot_moments()} supports a wide range of
target distributions. See \link{dists} for details.
Whether a distribution is supported or not depends on whether the underlying
\code{R} object possesses functions to calculate the moments of the
distribution from the predicted parameters. To achieve this for as many
distributional families as possible, we worked together with both the authors
of \link{gamlss} (Rigby and Stasinopoulos 2005) and \link{bamlss} (Umlauf et
al. 2018) and implemented the moment functions for almost all available
distributions in the respective packages. The \link{betareg} family was
implemented in \link{distreg.vis} as well.
}
\examples{
# Generating some data
dat <- model_fam_data(fam_name = "LOGNO")
# Estimating the model
library("gamlss")
model <- gamlss(LOGNO ~ ps(norm2) + binomial1,
~ ps(norm2) + binomial1,
data = dat, family = "LOGNO")
# Get newdata by either specifying an own data.frame, or using set_mean()
# for obtaining mean vals of explanatory variables
ndata_user <- dat[1:5, c("norm2", "binomial1")]
ndata_auto <- set_mean(model_data(model))
# Influence graphs
plot_moments(model, int_var = "norm2", pred_data = ndata_user) # cont. var
plot_moments(model, int_var = "binomial1", pred_data = ndata_user) # discrete var
plot_moments(model, int_var = "norm2", pred_data = ndata_auto) # with new ndata
# If pred_data argument is omitted plot_moments uses mean explanatory
# variables for prediction (using set_mean)
plot_moments(model, int_var = "norm2")
# Rug Plot
plot_moments(model, int_var = "norm2", rug = TRUE)
# Different colour palette
plot_moments(model, int_var = "binomial1", palette = "Dark2")
# Using an external function
ineq <- function(par) {
2 * pnorm((par[["sigma"]] / 2) * sqrt(2)) - 1
}
plot_moments(model, int_var = "norm2", pred_data = ndata_user, ex_fun = "ineq")
}
\references{
Rigby RA, Stasinopoulos DM (2005). "Generalized Additive Models
for Location, Scale and Shape." Journal of the Royal Statistical Society C,
54(3), 507-554.
Umlauf, N, Klein N, Zeileis A (2018). "BAMLSS: Bayesian
Additive Models for Location, Scale and Shape (and Beyond)." Journal of
Computational and Graphical Statistics, 27(3), 612-627.
}
|
msg <- list(
winTitle = 'Magnetic field plots',
samplesLbl = 'Samples:',
openBtn = 'Open',
openSamplesDialog = 'Open file wiht samples data',
variationLbl = 'Variation:',
openVariationDialog = 'Open file with variation data',
decDelimLbl = 'Use comma as decimal separator',
drawBtn = 'Draw plots',
readError = "Can't plot data for specified files",
samplesPlotTitle = 'Samples',
samplesPlotYLable = 'Value',
samplesPlotXLable = 'Picket',
variationPlotTitle = 'Variation',
variationPlotYLable = 'Value',
variationPlotXLable = 'Time',
diffPlotTitle = 'Output data',
diffPlotYLable = 'Anomaly values',
diffPlotXLable = 'Pickets',
readyStatus = 'Select samples and variation files to draw a plot',
noSamples = 'Samples file is not set',
noVariation = 'Variation file is not set'
)
msgRu <- list(
winTitle = 'Графики магнитного поля',
samplesLbl = 'Измеренные данные:',
openBtn = 'Открыть',
openSamplesDialog = 'Выбирете файл со измеренными данными',
variationLbl = 'Вариация:',
openVariationDialog = 'Выбирете файл со значениями вариации',
decDelimLbl = 'Использовать запятую для разделения разрядов',
drawBtn = 'Построить графики',
readError = 'Не могу построить графики для указаных файлов, (см. run.log)',
samplesPlotTitle = 'Измеренные данных',
samplesPlotYLable = 'Значение',
samplesPlotXLable = 'Пикет',
variationPlotTitle = 'Вариация',
variationPlotYLable = 'Значение',
variationPlotXLable = 'Время',
diffPlotTitle = 'Выходные данные',
diffPlotYLable = 'Аномальные значения',
diffPlotXLable = 'Пикет',
readyStatus = 'Выбирите файлы с измеренными данными и вариацией для постороения графиков',
noSamples = 'Файл с отсчетами не выбран',
noVariation = 'Файл с вариацией не выбран'
)
# XXX: labes with russian text are ugly in windows
# if (.Platform$OS.type == 'windows') {
# tryCatch({
# Sys.setlocale('LC_ALL', 'rus')
# msg <- msgRu
# },
# warning = function(e) {
# print(e)
# })
# } else
if (.Platform$OS.type == 'unix' && Sys.getenv('LANG') == 'ru_RU.UTF-8') {
msg <- msgRu
}
| /src/locale.r | no_license | rkuchumov/magnetic_plot | R | false | false | 2,687 | r | msg <- list(
winTitle = 'Magnetic field plots',
samplesLbl = 'Samples:',
openBtn = 'Open',
openSamplesDialog = 'Open file wiht samples data',
variationLbl = 'Variation:',
openVariationDialog = 'Open file with variation data',
decDelimLbl = 'Use comma as decimal separator',
drawBtn = 'Draw plots',
readError = "Can't plot data for specified files",
samplesPlotTitle = 'Samples',
samplesPlotYLable = 'Value',
samplesPlotXLable = 'Picket',
variationPlotTitle = 'Variation',
variationPlotYLable = 'Value',
variationPlotXLable = 'Time',
diffPlotTitle = 'Output data',
diffPlotYLable = 'Anomaly values',
diffPlotXLable = 'Pickets',
readyStatus = 'Select samples and variation files to draw a plot',
noSamples = 'Samples file is not set',
noVariation = 'Variation file is not set'
)
msgRu <- list(
winTitle = 'Графики магнитного поля',
samplesLbl = 'Измеренные данные:',
openBtn = 'Открыть',
openSamplesDialog = 'Выбирете файл со измеренными данными',
variationLbl = 'Вариация:',
openVariationDialog = 'Выбирете файл со значениями вариации',
decDelimLbl = 'Использовать запятую для разделения разрядов',
drawBtn = 'Построить графики',
readError = 'Не могу построить графики для указаных файлов, (см. run.log)',
samplesPlotTitle = 'Измеренные данных',
samplesPlotYLable = 'Значение',
samplesPlotXLable = 'Пикет',
variationPlotTitle = 'Вариация',
variationPlotYLable = 'Значение',
variationPlotXLable = 'Время',
diffPlotTitle = 'Выходные данные',
diffPlotYLable = 'Аномальные значения',
diffPlotXLable = 'Пикет',
readyStatus = 'Выбирите файлы с измеренными данными и вариацией для постороения графиков',
noSamples = 'Файл с отсчетами не выбран',
noVariation = 'Файл с вариацией не выбран'
)
# XXX: labes with russian text are ugly in windows
# if (.Platform$OS.type == 'windows') {
# tryCatch({
# Sys.setlocale('LC_ALL', 'rus')
# msg <- msgRu
# },
# warning = function(e) {
# print(e)
# })
# } else
if (.Platform$OS.type == 'unix' && Sys.getenv('LANG') == 'ru_RU.UTF-8') {
msg <- msgRu
}
|
# Check the speed of lm() in loop
# Karolina Sikorska and Paul Eilers, 2012
# 1st Model Simulated. Has slowest speed.
# Simulate data
set.seed(2012)
n = 10000
m = 1000
# runif: random values from uniform distribution
S = matrix(2 * runif(n * m), n, m)
y = rnorm(n)
# rnorm: random values from normal distribution
# Do the computations
t0 = proc.time()[1]
# t0 marks the staring time
beta = rep(0, m)
# initializing beta vector as (0,0,0,0,0...)
for(i in 1:m){
# generating a linear mode based upon one SNPs having n states. Linear model provides intercept and slope
mod = lm(y ~ S[,i])
beta[i] = mod$coeff[2]
}
# Report time
t1 = proc.time()[1] - t0
msip = 1e-06 * n * m / t1
cat(sprintf("Speed: %2.1f Msips\n", msip))
beta
| /Regression/Parallel Regression/Linear regression/speed_lm.r | no_license | tanu17/Genome-Wide-Association-Studies-and-R | R | false | false | 738 | r | # Check the speed of lm() in loop
# Karolina Sikorska and Paul Eilers, 2012
# 1st Model Simulated. Has slowest speed.
# Simulate data
set.seed(2012)
n = 10000
m = 1000
# runif: random values from uniform distribution
S = matrix(2 * runif(n * m), n, m)
y = rnorm(n)
# rnorm: random values from normal distribution
# Do the computations
t0 = proc.time()[1]
# t0 marks the staring time
beta = rep(0, m)
# initializing beta vector as (0,0,0,0,0...)
for(i in 1:m){
# generating a linear mode based upon one SNPs having n states. Linear model provides intercept and slope
mod = lm(y ~ S[,i])
beta[i] = mod$coeff[2]
}
# Report time
t1 = proc.time()[1] - t0
msip = 1e-06 * n * m / t1
cat(sprintf("Speed: %2.1f Msips\n", msip))
beta
|
#################################################################
#Generate plots
#################################################################
#setwd('D:/Publications/IMIS-ShOpt/incremental-mixture-importance-submitted/codeSubmit/codeSubmit/FhN_One_IMIS_ShOpt_IMIS_Opt')
rm(list = ls(all = TRUE))
#setwd('E:/IMISCode/IMIS-ShOpt_bcp_VM_r7l_July_13/FhN_One_IMIS_ShOpt_IMIS_Opt/FhN_One_IMIS_ShOpt_IMIS_Opt')
setwd('E:/Publications/IMISCode_july_17_2018_submitted/IMIS_ShOpt/FhN_One_IMIS_ShOpt_IMIS_Opt')
source("Two-stage-FhN-just-c-with-prior.R") # 2-stage functions
source("IMIS.opt.colloc.proc-3optimizers.general-no-touchups.R") # General IMIS 3 optimizers function
source("fhn-model-set-up-x0proc-just-c.R") # likelihood etc...
source("FhN-model-set-up-as-ode-x0proc-thetalik-justc.R") # basic FhN functions
source("makeSSElik.R")
source("makeSSEprocFHN.R")
library(doParallel)
library(CollocInfer)
#output_fullModel=get(load('E:/IMISCode/IMIS-ShOpt_bcp_VM_r7l_July_13/FhN_fullModel_IMIS_ShOpt/IMIS_shopt_full_fhn_D10.RData'))
output_fullModel=get(load('E:/Publications/IMISCode_july_17_2018_submitted/IMIS_ShOpt/FhN_fullModel_IMIS_ShOpt/IMIS_shopt_full_fhn_D10.RData'))
output_1parModelIMIS_shOpt=get(load('FhN_1Param_IMIS_Shopt_D4.RData'))
output_IMIS_opt=get(load('FhN_1Param_IMIS_Opt_D12.RData'))
#get solution and the data around c=mean(output_IMIS_opt$resample)
times = seq(0,20,0.2)
print("Note that the parameter labelled 'sd' is actually a variance. will fix this eventually")
x0 = c(-1,1)
names(x0) = c("V","R")
pars=mean(output_IMIS_opt$resample)
parnames =names(pars)=c("c")
fhn=make.FHN()
y_c11 = lsoda(x0,times,fhn$fn.ode,pars)
y_c11 = y_c11[,2:3]
y=output_IMIS_opt$data
#data_c11 = y_c11 + matrix(rnorm(dim(y_c11)[1]*2,0,sqrt(.05^2)),length(times),2)
cl <- makeCluster(4)
registerDoParallel(cl)
clusterCall(cl,function(x) {library(deSolve);library(CollocInfer);library(numDeriv);library(lokern)})
clusterExport(cl,varlist=list('IMIS.opt.colloc.3optimizers.general.no.touch.ups','d2negnormdp2',"make.fhn","%dopar%","foreach",'make.SSEproc.FHN',"neglogprior","prior","likelihood",'times',"dnegnormdp",'make.SSElik',"dneglogpriordpar","lokerns","ksLqudratic",'simex.fun.justc','neq','der.fhn.justc','jac.fhn.justc','d2neglogpriordpar2'))
clusterExport(cl,varlist=ls())
#output_IMIS_opt$data=data
#save(output_IMIS_opt,file='FhN_1Param_IMIS_Opt_D3.RData')
cgrid=seq(0.2,20,length=1000)
loglik=sapply(cgrid,function(x) {likelihood(x,logs=TRUE,data=output_IMIS_opt$data)})
logpost=sapply(cgrid,function(x) {prior(x,logs=TRUE)+likelihood(x,logs=TRUE,data=output_IMIS_opt$data)})
log_prior=sapply(cgrid,function(x) {prior(x,logs=TRUE)})
#stopCluster(cl)
setEPS()
postscript("FIG1.eps",horizontal=FALSE, paper="special",height=24,width=24, colormodel = "cmyk",
family = "Helvetica")
#png('loglikPriorIMIS.png',height = 450,width=600)
#par(mfrow=c(2,2),oma=c(3,2,rep(0,2))+2,mar=c(1,2,3,1))
#par(mfrow=c(2,2),oma=c(3,2,rep(1,2))+0.005,mar=c(8,8,8,8)+0.5)
par(mfrow=c(3,2),oma=c(3,2,rep(1,2))+0.005,mar=c(8,8,8,8)+0.5)
#unnormalized log posterior
plot(cgrid,(logpost),cex.lab=4,cex.axis=4,cex.main=5,mgp=c(6,2.5,0),xlab='c',ylab='density',main='A. Unnormalized log posterior', lwd=3)
#Likelihood over a coarse grid
plot(cgrid,loglik,cex.lab=4,cex.axis=4,cex.main=5,mgp=c(6,2.5,0),xlab='c',ylab='density',main='B.Log likelihood', lwd=3)
#log prior
plot(cgrid,log_prior,cex.lab=4,cex.axis=4,cex.main=5,mgp=c(6,2.5,0),xlab='c',ylab='density',main='C.Log prior c~N(14,2)', lwd=3)
#IMIS-Opt posterior estimate
plot(density(output_IMIS_opt$resample),xlab='c',ylab='density',mgp=c(6,2.5,0),xlim=range(cgrid),main='D.IMIS-Opt posterior density',cex.axis=4,cex.main=5,cex.lab=4, lwd=3)
##IMIS-Opt posterior estimate zoomed in
##par(new=TRUE, oma=c(5,6,0,0.005))
plot(density(output_1parModelIMIS_shOpt$resample),xlab='c',ylab='density',mgp=c(6,2.5,0),xlim=range(cgrid),main='E.IMIS-ShOpt posterior density',cex.axis=4,cex.main=5,cex.lab=4, lwd=3)
par(new=TRUE, oma=c(9,13,1,0))
# ##par(new=TRUE, oma=c(5,6,0,0.005))
#
matLayout=matrix(0,6, 4, byrow = TRUE)
#matLayout[2,1]=1; matLayout[2,2]=1
#matLayout[3,1]=1; matLayout[3,2]=1
#matLayout[3,3]=1
matLayout[4,3]=1; #matLayout[4,2]=1
layout(matLayout)
plot(density(output_IMIS_opt$resample,adj=6), col='red',main='IMIS-Opt:zoom in',cex.axis=2.5,cex.main=4,cex.lab=3,xlab='',lwd=3,mgp=c(1.5,1,0),ylab='')
#dev.off()
# setEPS()
# postscript("FIG11.eps",horizontal=FALSE, paper="special",height=9,width=12, colormodel = "cmyk",
# family = "Helvetica")
# par(mfrow=c(1,1))
# par(mar=c(8,8,8,8)+0.5)#c(7,9,5,5))
#plot(density(output_1parModelIMIS_shOpt$resample),xlab='c',ylab='density',mgp=c(6,2.5,0),xlim=range(cgrid),main='E.IMIS-ShOpt posterior density',cex.axis=4,cex.main=4,cex.lab=4)
par(new=TRUE, oma=c(9,4,0,12))
#
# # matLayout=matrix(0,3, 3, byrow = TRUE)
# # matLayout[2,2]=1; matLayout[2,3]=1
# # matLayout[3,2]=1; matLayout[3,3]=1
#
matLayout=matrix(0,6, 4, byrow = TRUE)
matLayout[6,2]=1
layout(matLayout)
plot(density(output_1parModelIMIS_shOpt$resample,adj=6), col='red',main='IMIS-ShOpt:zoom in',cex.axis=3,cex.main=4,cex.lab=3,xlab='',lwd=3,mgp=c(1.5,1,0),ylab='')
dev.off()
png('StateVariablesData.png',width=700,height = 400)
par(mfrow=c(1,2),oma=c(3,2,rep(0,2))+0.05,mar=c(2,1,3,1)+2)
plot(times,y_c11[,1],col='blue',main=paste('A.State variables and obs., \n c=',round(mean(output_IMIS_opt$resample),2),sep=''),cex.axis=2,cex.main=2,cex.lab=2,xlab='times',ylab='',lwd=3,lty=1)
lines(times,y_c11[,2],col='green',lwd=2)
points(times,output_IMIS_opt$data[,1],col='red',lwd=3)
points(times,output_IMIS_opt$data[,2],col='orange',lwd=2)
par(xpd=TRUE)
legend(-0.00015,-0.95,c('V','R',expression(Y[V]),expression(Y[R])),cex=0.85,lty = c(1, 1, NA,NA), pch = c(NA, NA,1,1),col=c('blue',"green","red",'orange'),lwd=c(3,2,3,2))
plot(times,y[,1],col='blue',main='B. State variables and obs., \n c=3',cex.axis=2,cex.main=2,cex.lab=2,xlab='times',ylab='',type='l',lwd=3)
lines(times,y[,2],col='green',lwd=2)
points(times,output_IMIS_opt$data[,1],col='red',lwd=3)
points(times,output_IMIS_opt$data[,2],col='orange',lwd=2)
par(xpd=TRUE)
legend(-0.00015,-1,c('V','R',expression(Y[V]),expression(Y[R])),cex=0.85,lty = c(1, 1, NA,NA), pch = c(NA, NA,1,1),col=c('blue',"green","red",'orange'),lwd=c(3,2,3,2))
dev.off()
#caclulate KL divergence
library(flexmix)
##evaluate the density of IMIS-ShOpt samples over the interval [2.5,3.5]
##use marginal likelihood obtained from the IMIS-ShOpt as normalizing constant
dsamples=density(output_1parModelIMIS_shOpt$resample, from=2.5, to=3.5)
normalizedsamples=dsamples$y/exp(output_1parModelIMIS_shOpt$stat[2,1])
##evaluate the theoretical density over the same interval
cgrid=seq(2.5,3.5,length=length(dsamples$y))
logpost=sapply(cgrid,function(x) {prior(x,logs=TRUE)+likelihood(x,logs=TRUE,data=output_IMIS_opt$data)})
##numerically integrate the target posterior to obtain the normalizing constant
normconstintegrand <- function(x) {exp(prior(x,logs=TRUE)+likelihood(x,logs=TRUE,data=output_IMIS_opt$data))}
normconst=integrate(normconstintegrand,lower=2.5,upper=3.5)
norm_post=exp(logpost)/normconst$value
#stopCluster(cl)
#plot(cgrid,norm_post)
#lines(dsamples$x,normalizedsamples,col='red')
#plot(dsamples$x,normalizedsamples)
#plot(cgrid,norm_post)
KLdiv(cbind(norm_post,normalizedsamples))
# norm_post normalizedsamples
# norm_post 0.000000000 0.001636068
# normalizedsamples 0.000962198 0.000000000
#IMIS-Opt
dsamples=density(output_IMIS_opt$resample, from=2.5, to=3.5)
normalizedsamples=dsamples$y/exp(output_IMIS_opt$stat[2,1])
##evaluate the theoretical density over the same interval
cgrid=seq(2.5,3.5,length=length(dsamples$y))
logpost=sapply(cgrid,function(x) {prior(x,logs=TRUE)+likelihood(x,logs=TRUE,data=output_IMIS_opt$data)})
##numerically integrate the target posterior to obtain the normalizing constant
normconstintegrand <- function(x) {exp(prior(x,logs=TRUE)+likelihood(x,logs=TRUE,data=output_IMIS_opt$data))}
normconst=integrate(normconstintegrand,lower=2.5,upper=3.5)
norm_post=exp(logpost)/normconst$value
KLdiv(cbind(norm_post,normalizedsamples))
# norm_post normalizedsamples
# norm_post 0.000000 3.381397
# normalizedsamples 8.473557 0.000000
PlotsResampledTraj=function(times=seq(0,20,0.2),output,title,filename){
if (is.vector(output$resample)){
getTraj=lapply(1:length(output$resample), function(x) lsoda(x0,times,fhn$fn.ode, output$resample[x] ))
meanSol=lsoda(x0,times,fhn$fn.ode, mean(output$resample) )
}else{
getTraj=lapply(1:nrow(output$resample), function(x) lsoda(x0,times,fhn$fn.ode, output$resample[x,] ))
meanSol=lsoda(x0,times,fhn$fn.ode, colMeans(output$resample) )
}
#png(filename)
setEPS()
postscript(filename,horizontal=FALSE, paper="special",height=14,width=18, colormodel = "cmyk",
family = "Helvetica")
par(mfrow=c(1,1),mar=c(9,9,9,9))
plot(times,getTraj[[1]][,'V'], col='grey',main=title,type='l',mgp=c(7,2.5,0),ylab='',xlab='time',cex.axis=3.5,cex.main=4.5,cex.lab=4,lwd=2)
box(lty = "solid",col='black')
for (i in (2:length(getTraj))){
lines(times,getTraj[[i]][,'V'],col='grey',lwd=5)
}
lines(times,meanSol[,'V'],col='blue',lwd=5)
points(seq(0,20,0.2),output$data[,1],pch=16,col='red', cex = 2)
lines(times,getTraj[[1]][,'R'], col='grey',main='',type='l',lwd=5)
for (i in (2:length(getTraj))){
lines(times,getTraj[[i]][,'R'],col='grey',lwd=5)
}
lines(times,meanSol[,'R'],col='darkgreen',lwd=2)
points(seq(0,20,0.2),output$data[,2],pch=19,col='red', cex = 2)
legend(-0.09,-1.05,c("V","R",'data'),cex=3.5,lty=c(1,1,NA),pch=c(NA,NA,19),col=c("blue","darkgreen",'red'),lwd=c(5,2,2))
dev.off()
}
par(mfrow=c(1,1))
PlotsResampledTraj(times=seq(0,20,0.2),output=output_IMIS_opt,title='A.IMIS-Opt, one parameter FhN',filename="FIG2.eps")
PlotsResampledTraj(times=seq(0,20,0.2),output=output_1parModelIMIS_shOpt,title='B.IMIS-ShOpt, one parameter FhN',filename="FIG3.eps")
PlotsResampledTraj(times=seq(0,20,0.2),output=output_fullModel,title='C.IMIS-ShOpt, full FhN',filename="FIG4.eps")
#PlotsResampledTraj(times=seq(0,20,0.2),output=output_1parModelIMIS_shOpt,title='IMIS-ShOpt samples, FhN-ODE model',filename='Oneparam_IMIS_shOpt_Splunk.png')
setEPS()
postscript("FIG9.eps",horizontal=FALSE, paper="special",height=14,width=18, colormodel = "cmyk",
family = "Helvetica")
par(mfrow=c(1,1),mar=c(6,8,8.5,6))
h1=hist(output_1parModelIMIS_shOpt$X_all[1:1000],breaks=35,plot=F)
h1$counts=h1$counts/sum(h1$counts)
h2=hist(output_1parModelIMIS_shOpt$X_all[1001:1200],breaks=15,plot=F)
h2$counts=h2$counts/sum(h2$counts)
rangey=max(range(h1$counts)[2],range(h2$counts)[2])
plot(h1,main='Importance sampling distribution \nat the end of the Shotgun optimization stage',cex.main=4.5,xlab='c',ylab='Density', xlim=c(0,20),ylim=c(0,rangey),col='grey20',cex.lab=4,cex.axis=3.5,mgp=c(5,2,0))
#title('Importance sampling distribution \nat the end of the Shotgun optimization stage',cex=15)
par(new=T)
plot(h2,main='',xlab='',ylab='', xlim=c(0,20),ylim=c(0,rangey),cex.lab=4,cex.axis=3.5,col='grey80',mgp=c(5,2,0))
points(output_1parModelIMIS_shOpt$center[2],0,col='grey10',pch=19,cex=5)
points(output_1parModelIMIS_shOpt$center[3],0,col='grey10',pch=19,cex=5)
points(output_1parModelIMIS_shOpt$center[1],0,col='grey10',pch=19,cex=5)
points(output_1parModelIMIS_shOpt$center[7],0,col='grey10',pch=19,cex=5)
lines(density(output_1parModelIMIS_shOpt$resample)$x,density(output_1parModelIMIS_shOpt$resample)$y/245,col='grey10',lwd=10);
dev.off()
# plot(density(output_1parModelIMIS_shOpt$center[1:30]),cex.axis=4,cex.main=4,cex.lab=4,main='D. Shotgun opimization: discovered modes', xlim=c(0,20), xlab='c',mgp=c(6,1.5,0))
# text(12,0.12,'NLS, GP',cex=2.5)
# text(3,0.07,'Two-Stage',cex=2.5)
| /FhN_One_IMIS_ShOpt_IMIS_Opt/plots.R | no_license | BiljanaJSJ/IMIS-ShOpt | R | false | false | 11,998 | r | #################################################################
#Generate plots
#################################################################
#setwd('D:/Publications/IMIS-ShOpt/incremental-mixture-importance-submitted/codeSubmit/codeSubmit/FhN_One_IMIS_ShOpt_IMIS_Opt')
rm(list = ls(all = TRUE))
#setwd('E:/IMISCode/IMIS-ShOpt_bcp_VM_r7l_July_13/FhN_One_IMIS_ShOpt_IMIS_Opt/FhN_One_IMIS_ShOpt_IMIS_Opt')
setwd('E:/Publications/IMISCode_july_17_2018_submitted/IMIS_ShOpt/FhN_One_IMIS_ShOpt_IMIS_Opt')
source("Two-stage-FhN-just-c-with-prior.R") # 2-stage functions
source("IMIS.opt.colloc.proc-3optimizers.general-no-touchups.R") # General IMIS 3 optimizers function
source("fhn-model-set-up-x0proc-just-c.R") # likelihood etc...
source("FhN-model-set-up-as-ode-x0proc-thetalik-justc.R") # basic FhN functions
source("makeSSElik.R")
source("makeSSEprocFHN.R")
library(doParallel)
library(CollocInfer)
#output_fullModel=get(load('E:/IMISCode/IMIS-ShOpt_bcp_VM_r7l_July_13/FhN_fullModel_IMIS_ShOpt/IMIS_shopt_full_fhn_D10.RData'))
output_fullModel=get(load('E:/Publications/IMISCode_july_17_2018_submitted/IMIS_ShOpt/FhN_fullModel_IMIS_ShOpt/IMIS_shopt_full_fhn_D10.RData'))
output_1parModelIMIS_shOpt=get(load('FhN_1Param_IMIS_Shopt_D4.RData'))
output_IMIS_opt=get(load('FhN_1Param_IMIS_Opt_D12.RData'))
#get solution and the data around c=mean(output_IMIS_opt$resample)
times = seq(0,20,0.2)
print("Note that the parameter labelled 'sd' is actually a variance. will fix this eventually")
x0 = c(-1,1)
names(x0) = c("V","R")
pars=mean(output_IMIS_opt$resample)
parnames =names(pars)=c("c")
fhn=make.FHN()
y_c11 = lsoda(x0,times,fhn$fn.ode,pars)
y_c11 = y_c11[,2:3]
y=output_IMIS_opt$data
#data_c11 = y_c11 + matrix(rnorm(dim(y_c11)[1]*2,0,sqrt(.05^2)),length(times),2)
cl <- makeCluster(4)
registerDoParallel(cl)
clusterCall(cl,function(x) {library(deSolve);library(CollocInfer);library(numDeriv);library(lokern)})
clusterExport(cl,varlist=list('IMIS.opt.colloc.3optimizers.general.no.touch.ups','d2negnormdp2',"make.fhn","%dopar%","foreach",'make.SSEproc.FHN',"neglogprior","prior","likelihood",'times',"dnegnormdp",'make.SSElik',"dneglogpriordpar","lokerns","ksLqudratic",'simex.fun.justc','neq','der.fhn.justc','jac.fhn.justc','d2neglogpriordpar2'))
clusterExport(cl,varlist=ls())
#output_IMIS_opt$data=data
#save(output_IMIS_opt,file='FhN_1Param_IMIS_Opt_D3.RData')
cgrid=seq(0.2,20,length=1000)
loglik=sapply(cgrid,function(x) {likelihood(x,logs=TRUE,data=output_IMIS_opt$data)})
logpost=sapply(cgrid,function(x) {prior(x,logs=TRUE)+likelihood(x,logs=TRUE,data=output_IMIS_opt$data)})
log_prior=sapply(cgrid,function(x) {prior(x,logs=TRUE)})
#stopCluster(cl)
setEPS()
postscript("FIG1.eps",horizontal=FALSE, paper="special",height=24,width=24, colormodel = "cmyk",
family = "Helvetica")
#png('loglikPriorIMIS.png',height = 450,width=600)
#par(mfrow=c(2,2),oma=c(3,2,rep(0,2))+2,mar=c(1,2,3,1))
#par(mfrow=c(2,2),oma=c(3,2,rep(1,2))+0.005,mar=c(8,8,8,8)+0.5)
par(mfrow=c(3,2),oma=c(3,2,rep(1,2))+0.005,mar=c(8,8,8,8)+0.5)
#unnormalized log posterior
plot(cgrid,(logpost),cex.lab=4,cex.axis=4,cex.main=5,mgp=c(6,2.5,0),xlab='c',ylab='density',main='A. Unnormalized log posterior', lwd=3)
#Likelihood over a coarse grid
plot(cgrid,loglik,cex.lab=4,cex.axis=4,cex.main=5,mgp=c(6,2.5,0),xlab='c',ylab='density',main='B.Log likelihood', lwd=3)
#log prior
plot(cgrid,log_prior,cex.lab=4,cex.axis=4,cex.main=5,mgp=c(6,2.5,0),xlab='c',ylab='density',main='C.Log prior c~N(14,2)', lwd=3)
#IMIS-Opt posterior estimate
plot(density(output_IMIS_opt$resample),xlab='c',ylab='density',mgp=c(6,2.5,0),xlim=range(cgrid),main='D.IMIS-Opt posterior density',cex.axis=4,cex.main=5,cex.lab=4, lwd=3)
##IMIS-Opt posterior estimate zoomed in
##par(new=TRUE, oma=c(5,6,0,0.005))
plot(density(output_1parModelIMIS_shOpt$resample),xlab='c',ylab='density',mgp=c(6,2.5,0),xlim=range(cgrid),main='E.IMIS-ShOpt posterior density',cex.axis=4,cex.main=5,cex.lab=4, lwd=3)
par(new=TRUE, oma=c(9,13,1,0))
# ##par(new=TRUE, oma=c(5,6,0,0.005))
#
matLayout=matrix(0,6, 4, byrow = TRUE)
#matLayout[2,1]=1; matLayout[2,2]=1
#matLayout[3,1]=1; matLayout[3,2]=1
#matLayout[3,3]=1
matLayout[4,3]=1; #matLayout[4,2]=1
layout(matLayout)
plot(density(output_IMIS_opt$resample,adj=6), col='red',main='IMIS-Opt:zoom in',cex.axis=2.5,cex.main=4,cex.lab=3,xlab='',lwd=3,mgp=c(1.5,1,0),ylab='')
#dev.off()
# setEPS()
# postscript("FIG11.eps",horizontal=FALSE, paper="special",height=9,width=12, colormodel = "cmyk",
# family = "Helvetica")
# par(mfrow=c(1,1))
# par(mar=c(8,8,8,8)+0.5)#c(7,9,5,5))
#plot(density(output_1parModelIMIS_shOpt$resample),xlab='c',ylab='density',mgp=c(6,2.5,0),xlim=range(cgrid),main='E.IMIS-ShOpt posterior density',cex.axis=4,cex.main=4,cex.lab=4)
par(new=TRUE, oma=c(9,4,0,12))
#
# # matLayout=matrix(0,3, 3, byrow = TRUE)
# # matLayout[2,2]=1; matLayout[2,3]=1
# # matLayout[3,2]=1; matLayout[3,3]=1
#
matLayout=matrix(0,6, 4, byrow = TRUE)
matLayout[6,2]=1
layout(matLayout)
plot(density(output_1parModelIMIS_shOpt$resample,adj=6), col='red',main='IMIS-ShOpt:zoom in',cex.axis=3,cex.main=4,cex.lab=3,xlab='',lwd=3,mgp=c(1.5,1,0),ylab='')
dev.off()
png('StateVariablesData.png',width=700,height = 400)
par(mfrow=c(1,2),oma=c(3,2,rep(0,2))+0.05,mar=c(2,1,3,1)+2)
plot(times,y_c11[,1],col='blue',main=paste('A.State variables and obs., \n c=',round(mean(output_IMIS_opt$resample),2),sep=''),cex.axis=2,cex.main=2,cex.lab=2,xlab='times',ylab='',lwd=3,lty=1)
lines(times,y_c11[,2],col='green',lwd=2)
points(times,output_IMIS_opt$data[,1],col='red',lwd=3)
points(times,output_IMIS_opt$data[,2],col='orange',lwd=2)
par(xpd=TRUE)
legend(-0.00015,-0.95,c('V','R',expression(Y[V]),expression(Y[R])),cex=0.85,lty = c(1, 1, NA,NA), pch = c(NA, NA,1,1),col=c('blue',"green","red",'orange'),lwd=c(3,2,3,2))
plot(times,y[,1],col='blue',main='B. State variables and obs., \n c=3',cex.axis=2,cex.main=2,cex.lab=2,xlab='times',ylab='',type='l',lwd=3)
lines(times,y[,2],col='green',lwd=2)
points(times,output_IMIS_opt$data[,1],col='red',lwd=3)
points(times,output_IMIS_opt$data[,2],col='orange',lwd=2)
par(xpd=TRUE)
legend(-0.00015,-1,c('V','R',expression(Y[V]),expression(Y[R])),cex=0.85,lty = c(1, 1, NA,NA), pch = c(NA, NA,1,1),col=c('blue',"green","red",'orange'),lwd=c(3,2,3,2))
dev.off()
#caclulate KL divergence
library(flexmix)
##evaluate the density of IMIS-ShOpt samples over the interval [2.5,3.5]
##use marginal likelihood obtained from the IMIS-ShOpt as normalizing constant
dsamples=density(output_1parModelIMIS_shOpt$resample, from=2.5, to=3.5)
normalizedsamples=dsamples$y/exp(output_1parModelIMIS_shOpt$stat[2,1])
##evaluate the theoretical density over the same interval
cgrid=seq(2.5,3.5,length=length(dsamples$y))
logpost=sapply(cgrid,function(x) {prior(x,logs=TRUE)+likelihood(x,logs=TRUE,data=output_IMIS_opt$data)})
##numerically integrate the target posterior to obtain the normalizing constant
normconstintegrand <- function(x) {exp(prior(x,logs=TRUE)+likelihood(x,logs=TRUE,data=output_IMIS_opt$data))}
normconst=integrate(normconstintegrand,lower=2.5,upper=3.5)
norm_post=exp(logpost)/normconst$value
#stopCluster(cl)
#plot(cgrid,norm_post)
#lines(dsamples$x,normalizedsamples,col='red')
#plot(dsamples$x,normalizedsamples)
#plot(cgrid,norm_post)
KLdiv(cbind(norm_post,normalizedsamples))
# norm_post normalizedsamples
# norm_post 0.000000000 0.001636068
# normalizedsamples 0.000962198 0.000000000
#IMIS-Opt
dsamples=density(output_IMIS_opt$resample, from=2.5, to=3.5)
normalizedsamples=dsamples$y/exp(output_IMIS_opt$stat[2,1])
##evaluate the theoretical density over the same interval
cgrid=seq(2.5,3.5,length=length(dsamples$y))
logpost=sapply(cgrid,function(x) {prior(x,logs=TRUE)+likelihood(x,logs=TRUE,data=output_IMIS_opt$data)})
##numerically integrate the target posterior to obtain the normalizing constant
normconstintegrand <- function(x) {exp(prior(x,logs=TRUE)+likelihood(x,logs=TRUE,data=output_IMIS_opt$data))}
normconst=integrate(normconstintegrand,lower=2.5,upper=3.5)
norm_post=exp(logpost)/normconst$value
KLdiv(cbind(norm_post,normalizedsamples))
# norm_post normalizedsamples
# norm_post 0.000000 3.381397
# normalizedsamples 8.473557 0.000000
PlotsResampledTraj=function(times=seq(0,20,0.2),output,title,filename){
if (is.vector(output$resample)){
getTraj=lapply(1:length(output$resample), function(x) lsoda(x0,times,fhn$fn.ode, output$resample[x] ))
meanSol=lsoda(x0,times,fhn$fn.ode, mean(output$resample) )
}else{
getTraj=lapply(1:nrow(output$resample), function(x) lsoda(x0,times,fhn$fn.ode, output$resample[x,] ))
meanSol=lsoda(x0,times,fhn$fn.ode, colMeans(output$resample) )
}
#png(filename)
setEPS()
postscript(filename,horizontal=FALSE, paper="special",height=14,width=18, colormodel = "cmyk",
family = "Helvetica")
par(mfrow=c(1,1),mar=c(9,9,9,9))
plot(times,getTraj[[1]][,'V'], col='grey',main=title,type='l',mgp=c(7,2.5,0),ylab='',xlab='time',cex.axis=3.5,cex.main=4.5,cex.lab=4,lwd=2)
box(lty = "solid",col='black')
for (i in (2:length(getTraj))){
lines(times,getTraj[[i]][,'V'],col='grey',lwd=5)
}
lines(times,meanSol[,'V'],col='blue',lwd=5)
points(seq(0,20,0.2),output$data[,1],pch=16,col='red', cex = 2)
lines(times,getTraj[[1]][,'R'], col='grey',main='',type='l',lwd=5)
for (i in (2:length(getTraj))){
lines(times,getTraj[[i]][,'R'],col='grey',lwd=5)
}
lines(times,meanSol[,'R'],col='darkgreen',lwd=2)
points(seq(0,20,0.2),output$data[,2],pch=19,col='red', cex = 2)
legend(-0.09,-1.05,c("V","R",'data'),cex=3.5,lty=c(1,1,NA),pch=c(NA,NA,19),col=c("blue","darkgreen",'red'),lwd=c(5,2,2))
dev.off()
}
par(mfrow=c(1,1))
PlotsResampledTraj(times=seq(0,20,0.2),output=output_IMIS_opt,title='A.IMIS-Opt, one parameter FhN',filename="FIG2.eps")
PlotsResampledTraj(times=seq(0,20,0.2),output=output_1parModelIMIS_shOpt,title='B.IMIS-ShOpt, one parameter FhN',filename="FIG3.eps")
PlotsResampledTraj(times=seq(0,20,0.2),output=output_fullModel,title='C.IMIS-ShOpt, full FhN',filename="FIG4.eps")
#PlotsResampledTraj(times=seq(0,20,0.2),output=output_1parModelIMIS_shOpt,title='IMIS-ShOpt samples, FhN-ODE model',filename='Oneparam_IMIS_shOpt_Splunk.png')
setEPS()
postscript("FIG9.eps",horizontal=FALSE, paper="special",height=14,width=18, colormodel = "cmyk",
family = "Helvetica")
par(mfrow=c(1,1),mar=c(6,8,8.5,6))
h1=hist(output_1parModelIMIS_shOpt$X_all[1:1000],breaks=35,plot=F)
h1$counts=h1$counts/sum(h1$counts)
h2=hist(output_1parModelIMIS_shOpt$X_all[1001:1200],breaks=15,plot=F)
h2$counts=h2$counts/sum(h2$counts)
rangey=max(range(h1$counts)[2],range(h2$counts)[2])
plot(h1,main='Importance sampling distribution \nat the end of the Shotgun optimization stage',cex.main=4.5,xlab='c',ylab='Density', xlim=c(0,20),ylim=c(0,rangey),col='grey20',cex.lab=4,cex.axis=3.5,mgp=c(5,2,0))
#title('Importance sampling distribution \nat the end of the Shotgun optimization stage',cex=15)
par(new=T)
plot(h2,main='',xlab='',ylab='', xlim=c(0,20),ylim=c(0,rangey),cex.lab=4,cex.axis=3.5,col='grey80',mgp=c(5,2,0))
points(output_1parModelIMIS_shOpt$center[2],0,col='grey10',pch=19,cex=5)
points(output_1parModelIMIS_shOpt$center[3],0,col='grey10',pch=19,cex=5)
points(output_1parModelIMIS_shOpt$center[1],0,col='grey10',pch=19,cex=5)
points(output_1parModelIMIS_shOpt$center[7],0,col='grey10',pch=19,cex=5)
lines(density(output_1parModelIMIS_shOpt$resample)$x,density(output_1parModelIMIS_shOpt$resample)$y/245,col='grey10',lwd=10);
dev.off()
# plot(density(output_1parModelIMIS_shOpt$center[1:30]),cex.axis=4,cex.main=4,cex.lab=4,main='D. Shotgun opimization: discovered modes', xlim=c(0,20), xlab='c',mgp=c(6,1.5,0))
# text(12,0.12,'NLS, GP',cex=2.5)
# text(3,0.07,'Two-Stage',cex=2.5)
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1941121239L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) | /dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615939032-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 826 | r | testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1941121239L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) |
############################################################################
cat("Load basemap for Middle East and North Africa with Egypt as center\n")
############################################################################
## this code is in an extra script because basemaps are sometimes not loaded
## loading must be repeated manually then (sometimes 2-3 times)
basemap <- get_map(location = "Egypt", zoom=3, maptype="terrain")
basemap.mena <- get_map(location = "Jerusalem", zoom=5, maptype="terrain")
basemap.yem <- get_map(location = "Yemen", zoom=6, maptype="terrain")
basemap.egy <- get_map(location = "Asyut", zoom=6, maptype="terrain")
basemap.lbn <- get_map(location = "Amman", zoom=7, maptype="terrain")
basemap.irq <- get_map(location = "Iraq", zoom=6, maptype="terrain")
basemap.syr <- get_map(location = "Syria", zoom=7, maptype="terrain")
| /code/02_data_exploration/00_get_basemaps.R | no_license | elenase/pattern_analysis | R | false | false | 870 | r | ############################################################################
cat("Load basemap for Middle East and North Africa with Egypt as center\n")
############################################################################
## this code is in an extra script because basemaps are sometimes not loaded
## loading must be repeated manually then (sometimes 2-3 times)
basemap <- get_map(location = "Egypt", zoom=3, maptype="terrain")
basemap.mena <- get_map(location = "Jerusalem", zoom=5, maptype="terrain")
basemap.yem <- get_map(location = "Yemen", zoom=6, maptype="terrain")
basemap.egy <- get_map(location = "Asyut", zoom=6, maptype="terrain")
basemap.lbn <- get_map(location = "Amman", zoom=7, maptype="terrain")
basemap.irq <- get_map(location = "Iraq", zoom=6, maptype="terrain")
basemap.syr <- get_map(location = "Syria", zoom=7, maptype="terrain")
|
library(RSQLite)
library(dbplyr)
# Set up
drv = dbDriver('SQLite')
dir = './'
dbFilename = 'FPA_FOD_20170508.sqlite'
db = dbConnect(drv, dbname = file.path(dir, dbFilename))
data = tbl(db, "Fires") %>% collect()
# Export data frames
write.csv(data, 'data.csv')
| /datapip.R | no_license | feichengqi/dataforsocialgood | R | false | false | 265 | r | library(RSQLite)
library(dbplyr)
# Set up
drv = dbDriver('SQLite')
dir = './'
dbFilename = 'FPA_FOD_20170508.sqlite'
db = dbConnect(drv, dbname = file.path(dir, dbFilename))
data = tbl(db, "Fires") %>% collect()
# Export data frames
write.csv(data, 'data.csv')
|
#' Weighted variance estimation
#'
#' @param X The numeric data vector.
#' @param wt The non-negative weight vector.
#' @param na.rm The character indicator wether to consider missing value(s) or not. The defult is FALSE.
#' @keywords internal
wvar <- function(X, wt, na.rm = FALSE) {
if (na.rm) {
wt <- wt[i <- !is.na(X)]
X <- X[i]
}
wsum <- sum(wt)
wmean = sum(wt * X) / wsum
varr = sum(wt * (X - wmean) ^ 2) / (wsum)
return(varr)
}
#' Weighted quartile estimation
#'
#' @param X The numeric data vector.
#' @param wt The non-negative weight vector.
#' @param p The percentile value. The defult is 0.5.
#' @keywords internal
wquantile <- function(X, wt, p = 0.5)
{
if (!is.numeric(wt) || length(X) != length(wt))
stop("X and wt must be numeric and equal-length vectors")
if (!is.numeric(p) || any(p < 0 | p > 1))
stop("Quartiles must be 0<=p<=1")
if (min(wt) < 0)
stop("Weights must be non-negative numbers")
ord <- order(X)
X <- X[ord]
cusumw <- cumsum(wt[ord])
sumW <- sum(wt)
plist <- cusumw / sumW
qua <- withCallingHandlers(approx(plist, X, p)$y, warning=function(w){invokeRestart("muffleWarning")})
return(qua)
}
#' Weighted inter-quartile range estimation
#'
#' @param X The numeric data vector.
#' @param wt The non-negative weight vector.
#' @keywords internal
wIQR <- function(X, wt) {
(wquantile(X = X, wt = wt, p = 0.75) - wquantile(X = X, wt = wt, p = 0.25))
}
#' Numerical Integral function using Simpson's rule
#'
#' @param x The numeric data vector.
#' @param fx The function.
#' @param n.pts Number of points.
#' @param method The character string specifying method of numerical integration. The possible options are \code{trap} for trapezoidal rule and \code{simps} for simpson'r rule.
#' @importFrom methods is
#' @keywords internal
integ <- function(x, fx, method, n.pts = 256) {
n = length(x)
if (method == "simps") {
if (is.function(fx) == TRUE)
fx = fx(x)
if (n != length(fx))
stop("Unequal input vector lengths")
if (n.pts < 64)
n.pts = 64
ap = approx(x, fx, n = 2 * n.pts + 1)
h = diff(ap$x)[1]
integral = h * (ap$y[2 * (1:n.pts) - 1] + 4 * ap$y[2 * (1:n.pts)] + ap$y[2 * (1:n.pts) + 1]) / 3
value = sum(integral)
}
if (method == "trap") {
if (!is.numeric(x) | !is.numeric(fx))
{
stop('The variable of integration "x" or "fx" is not numeric.')
}
if (length(x) != length(fx))
{
stop("The lengths of the variable of integration and the integrand do not match.")
}
# integrate using the trapezoidal rule
integral <- 0.5 * sum((x[2:(n)] - x[1:(n - 1)]) * (fx[1:(n - 1)] + fx[2:n]))
value <- integral
}
return(value)
}
#' Derivative of normal distribution
#'
#' @param X The numeric data vector.
#' @param ord The order of derivative.
#' @keywords internal
dnorkernel <- function(ord, X)
{
if (ord == 2)
# second derivative
result <- (1 / (sqrt(2 * pi))) * exp(-(X ^ 2) / 2) * ((X ^ 2) - 1)
else if (ord == 4)
# fourth derivative
result <- (1 / (sqrt(2 * pi))) * exp(-(X ^ 2) / 2) * (3 - (6 * (X ^ 2)) + X ^ 4)
else if (ord == 6)
# sixth derivative
result <- (1 / (sqrt(2 * pi))) * exp(-(X ^ 2) / 2) * (X ^ 6 - (15 * (X ^ 4)) + (45 * (X ^ 2)) - 15)
else if (ord == 8)
# eighth derivative
result <- (1 / (sqrt(2 * pi))) * exp(-(X ^ 2) / 2) * (X ^ 8 - (28 * (X ^ 6)) + (210 * (X ^ 4)) - (420 * (X ^ 2)) + 105)
return(result)
}
#' Distribution function without the ith observation
#'
#' @param X The numeric data vector.
#' @param y The vector where the kernel estimation is computed.
#' @param wt The non-negative weight vector.
#' @param ktype A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}".
#' @param bw A numeric bandwidth value.
#' @return Returns the estimated value for the bandwith parameter.
#' @author Kassu Mehari Beyene and Anouar El Ghouch
#' @keywords internal
ker_dis_i <- function(X, y, wt, ktype, bw)
{
n <- length(X);
AUX <- matrix(0, n, n);
zero <- rep(0, n);
ww <- outer(wt, zero, "-");
diag(ww) <- 0;
den <- apply(ww, 2, sum);
resu <- matrix(0, n, length(y));
for (j in 1:length(y))
{
AUX <- matrix(rep.int(outer(y[j], X, "-"), n), nrow = n, byrow = TRUE) / bw;
aux <- kfunc(ktype = ktype, difmat = AUX );
aux1 <- t(wt * t(aux));
diag(aux1) <- 0;
resu[, j] <- (apply(aux1, 1, sum)) / den;
}
return(resu)
}
#' The value of squared integral x^2 k(x) dx and integral x k(x) K(x) dx
#' @param ktype A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}".
#' @keywords internal
muro <- function(ktype)
{
if (ktype == "normal") {
ro <- 2 * 0.28209
mu2 <- 1
} else if (ktype == "epanechnikov") {
ro <- 2 * 0.12857
mu2 <- 1 / 5
} else if (ktype == "biweight") {
ro <- 2 * 0.10823
mu2 <- 1 / 7
} else if (ktype == "triweight") {
ro <- 2 * 0.095183
mu2 <- 1 / 9
}
return(list(ro = ro, mu2 = mu2))
}
#' Kernel distribution function
#'
#' @param X A numeric vector of sample data.
#' @param ktype A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}".
#' @return Returns a vector resulting from evaluating X.
#' @keywords internal
kfunction <- function(ktype, X) {
if (ktype == "normal") {
result <- pnorm(X)
}
else if (ktype == "epanechnikov") {
result <- (0.75 * X * (1 - (X ^ 2) / 3) + 0.5)
}
else if (ktype == "biweight") {
result <- ((15 / 16) * X - (5 / 8) * X ^ 3 + (3 / 16) * X ^ 5 + 0.5)
}
else if (ktype == "triweight") {
result <- ((35 / 32) * X - (35 / 32) * X ^ 3 + (21 / 32) * X ^ 5 - (5 / 32) * X ^ 7 + 0.5)
}
return(result)
}
#' Function to evaluate the matrix of data vector minus the grid points divided by the bandwidth value.
#'
#' @param difmat A numeric matrix of sample data (X) minus evaluation points (x0) divided by bandwidth value (bw).
#' @param ktype A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}". By default, the "\code{normal}" kernel is used.
#' @return Returns the matrix resulting from evaluating \code{difmat}.
#' @keywords internal
kfunc <- function(ktype = "normal", difmat)
{
if (ktype == "normal")
{
estim <- kfunction(ktype = "normal", X = difmat)
}
else if (ktype == "epanechnikov")
{
estim <- difmat
low <- (difmat <= -1)
up <- (difmat >= 1)
btwn <- (difmat > -1 & difmat < 1)
estim[low] <- 0
estim[up] <- 1
value <- estim[btwn]
estim[btwn] <- kfunction(ktype = "epanechnikov", X = value)
}
else if (ktype == "biweight")
{
estim <- difmat
low <- (difmat <= -1)
up <- (difmat >= 1)
btwn <- (difmat > -1 & difmat < 1)
estim[low] <- 0
estim[up] <- 1
value <- estim[btwn]
estim[btwn] <- kfunction(ktype = "biweight", X = value)
}
else if (ktype == "triweight")
{
estim <- difmat
low <- (difmat <= -1)
up <- (difmat >= 1)
btwn <- (difmat > -1 & difmat < 1)
estim[low] <- 0
estim[up] <- 1
value <- estim[btwn]
estim[btwn] <- kfunction(ktype = "triweight", X = value)
}
return(estim)
}
#' ROC estimation function
#'
#' @param U The vector of grid points where the ROC curve is estimated.
#' @param D The event indicator.
#' @param M The numeric vector of marker values for which the time-dependent ROC curves is computed.
#' @param bw The bandwidth parameter for smoothing the ROC function. The possible options are \code{NR} normal reference method; \code{PI} plug-in method and \code{CV} cross-validation method. The default is the \code{NR} normal reference method.
#' @param method is the method of ROC curve estimation. The possible options are \code{emp} emperical metod; \code{untra} smooth without boundary correction and \code{tra} is smooth ROC curve estimation with boundary correction.
#' @param ktype A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}".
#'
#' @author Beyene K. Mehari and El Ghouch Anouar
#'
#' @references Beyene, K. M. and El Ghouch A. (2020). Smoothed time-dependent receiver operating characteristic curve for right censored survival data. \emph{Statistics in Medicine}. 39: 3373– 3396.
#' @keywords internal
RocFun <- function(U, D, M, bw = "NR", method, ktype) {
oM <- order(M)
D <- (D[oM])
nD <- length(D)
sumD <- sum(D)
Z <- 1 - cumsum(1 - D) / (nD - sumD)
AUC <- sum(D * Z) / sumD
if (method == "emp") {
difmat <- (outer(U, Z, "-"))
resul <- (difmat >= 0)
roc1 <- sweep(resul, 2, D, "*")
roc <- apply(roc1, 1, sum) / sumD
bw1 <- NA
}
else if (method == "untra") {
Zt <- Z
Ut <- U
Ztt <- Zt[D != 0]
wt <- D[D != 0]
bw1 <- wbw(X = Ztt, wt = wt, bw = bw, ktype = ktype)$bw
difmat <- (outer(Ut, Ztt, "-")) / bw1
resul <- kfunc(ktype = ktype, difmat = difmat)
w <- wt / sum(wt)
roc1 <- sweep(resul, 2, w, "*")
roc <- apply(roc1, 1, sum)
}
else if (method == "tra") {
mul <- nD / (nD + 1)
Zt <- qnorm(mul * Z + (1 / nD ^ 2))
Ut <- qnorm(mul * U + (1 / nD ^ 2))
Ztt <- Zt[D != 0]
wt <- D[D != 0]
bw1 <- wbw(X = Ztt, wt = wt, bw = bw, ktype = ktype)$bw
difmat <- (outer(Ut, Ztt, "-")) / bw1
resul <- kfunc(ktype = ktype, difmat = difmat)
w <- wt / sum(wt)
roc1 <- sweep(resul, 2, w, "*")
roc <- apply(roc1, 1, sum)
}
else{
stop("The specified method is not correct.")
}
return(list(roc = roc, auc = AUC, bw = bw1))
}
#' Survival probability conditional to the observed data estimation for right censored data.
#'
#' @param Y The numeric vector of event-times or observed times.
#' @param M The numeric vector of marker values for which we want to compute the time-dependent ROC curves.
#' @param censor The censoring indicator, \code{1} if event, \code{0} otherwise.
#' @param t A scaler time point at which we want to compute the time-dependent ROC curve.
#' @param h A scaler for the bandwidth of Beran's weight calculaions. The defualt is using the method of Sheather and Jones (1991).
#' @param kernel A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", , "\code{tricube}", "\code{boxcar}", "\code{triangular}", or "\code{quartic}". The defaults is "\code{normal}" kernel density.
#' @return Return a vectors:
#' @return \code{positive } \code{P(T<t|Y,censor,M)}.
#' @return \code{negative } \code{P(T>t|Y,censor,M)}.
#' @references Beyene, K. M. and El Ghouch A. (2020). Smoothed time-dependent receiver operating characteristic curve for right censored survival data. \emph{Statistics in Medicine}. 39: 3373– 3396.
#' @references Li, Liang, Bo Hu and Tom Greene (2018). A simple method to estimate the time-dependent receiver operating characteristic curve and the area under the curve with right censored data, \emph{Statistical Methods in Medical Research}, 27(8): 2264-2278.
#' @references Pablo Martínez-Camblor and Gustavo F. Bayón and Sonia Pérez-Fernández (2016). Cumulative/dynamic roc curve estimation, \emph{Journal of Statistical Computation and Simulation}, 86(17): 3582-3594.
#' @keywords internal
Csurv <- function(Y, M, censor, t, h = NULL, kernel="normal") {
if (is.null(h)) {
h <- bw.SJ(M, method = "dpi")
}
if(kernel=="normal"){
kernel <- "gaussian"
}
n <- length(M)
positive <- rep(NA, n)
for (i in 1:n) {
if (Y[i] > t) {
positive[i] <- 0
} else {
if (censor[i] == 1) {
positive[i] <- 1
} else {
St <- Beran(time = Y, status = censor, covariate = M, x = M[i], y = t, kernel = kernel, bw = h)
Sy <- Beran(time = Y, status = censor, covariate = M, x = M[i], y = Y[i], kernel = kernel, bw = h)
if (Sy == 0) {
positive[i] <- 1
} else {
positive[i] <- 1 - St / Sy
}
}
}
}
negative <- 1 - positive
return(list(positive = positive, negative = negative))
}
# Function to compute the knots.
# This functions are based on the R package intcensROC.
.knotT <- function(U, V, delta, dim) {
size <- dim - 2
knot_pre_t = c(U[delta != 1], U[delta != 2], V[delta != 2], V[delta != 3])
qt = rep(0, size + 1)
qt[1] = 0
for (i in 2:(size + 1)) qt[i] = quantile(knot_pre_t, (i - 1)/size, name = F,
na.rm = TRUE)
knots = c(qt[1], qt[1], qt, qt[size + 1], qt[size + 1])
}
.knotM <- function(marker, dim) {
size <- dim - 2
knot_pre_m = marker
qt = rep(0, size + 1)
qt[1] = 0
for (i in 2:(size + 1)) qt[i] = quantile(knot_pre_m, (i - 1)/size, name = F,
na.rm = TRUE)
qt[size + 1] = max(marker + 0.1)
knots = c(qt[1], qt[1], qt, qt[size + 1], qt[size + 1])
}
#' Compute the conditional survival function for Interval Censored Survival Data
#'
#' @description A method to compute the survival function for the
#' interval censored survival data based on a spline function based constrained
#' maximum likelihood estimator. The maximization process of likelihood is
#' carried out by generalized gradient projection method.
#' @usage condS(L, R, M, Delta, t, m)
#' @param L The numericvector of left limit of observed time. For left censored observations \code{L == 0}.
#' @param R The numericvector of right limit of observed time. For right censored observation \code{R == inf}.
#' @param M An array contains marker levels for the samples.
#' @param Delta An array of indicator for the censored type, use 1, 2, 3 for
#' event happened before the left bound time, within the defined time range, and
#' after.
#' @param t A scalar indicates the predict time.
#' @param m A scalar for the cutoff of the marker variable.
#' @references Wu, Yuan; Zhang, Ying. Partially monotone tensor spline estimation
#' of the joint distribution function with bivariate current status data.
#' Ann. Statist. 40, 2012, 1609-1636 <doi:10.1214/12-AOS1016>
#' @keywords internal
condS <- function(L, R, M, Delta=NULL, t, m) {
n <- length(L)
U <- L
V <- R
Marker <- M
PredictTime <- t
ind <- (U<=0)
ind1 <- (V==Inf)
if (any(ind1 == TRUE)){
V[ind1] <- 10000000
}
if(is.null(Delta)){
Delta <- rep(2, n)
Delta[ind] <- 1
Delta[ind1] <- 3
}
if (any(Marker < 0))
stop(paste0("Negative marker value found!"))
# detemine the dimension of spline function
size <- length(U)
cadSize <- size^(1/3)
if (cadSize - floor(cadSize) < ceiling(cadSize) - cadSize) {
Dim <- floor(cadSize) + 2
} else {
Dim <- ceiling(cadSize) + 2
}
# compute the knots for time and marker
knotT <- .knotT(U, V, Delta, Dim)
knotM <- .knotM(Marker, Dim)
# compute the thetas
theta = .Call("_cenROC_sieve", PACKAGE = "cenROC", U, V, Marker, Delta,
knotT, knotM, Dim)
m2 <- m - 0.0001
m <- m + 0.0001
Fm = .Call("_cenROC_surva", PACKAGE = "cenROC", theta, m,
m2, PredictTime, knotT, knotM)
Fest <- 1 - Fm
return(Fest)
}
#' Survival probability conditional on the observed data estimation for interval censored data
#'
#' @param L The numericvector of left limit of observed time. For left censored observations \code{L == 0}.
#' @param R The numericvector of right limit of observed time. For right censored observation \code{R == inf}.
#' @param M The numeric vector of marker value.
#' @param t A scaler time point used to calculate the the ROC curve
#' @param method A character indication type of modeling. This include nonparametric \code{"np"},parmetric \code{"pa"} and semiparametric \code{"sp"}.
#' @param dist A character incating the type of distribution for parametric model. This includes are \code{"exponential"}, \code{"weibull"}, \code{"gamma"}, \code{"lnorm"}, \code{"loglogistic"} and \code{"generalgamma"}.
#' @return Return a vectors:
#' @return \code{positive } \code{P(T<t|L,R,M)}.
#' @return \code{negative } \code{P(T>t|L,R,M)}.
#' @references Beyene, K. M. and El Ghouch A. (2022). Time-dependent ROC curve estimation for interval-censored data. \emph{Biometrical Journal}, 64, 1056– 1074.
#' @keywords internal
ICsur <- function( L, R, M, t, method, dist) {
data <- data.frame(L=L, R=R, M=M)
n <- length(M) ;
positive <- rep(NA, n);
for (i in 1:n) {
if (R[i] <= t) {
positive[i] <- 1;
} else {
if (L[i] >= t) {
positive[i] <- 0;
} else {
if (method=="np"){
tmp1 <- condS(L=L, R=R, M=M, t=t, m=M[i])
tmp2 <- condS(L=L, R=R, M=M, t=L[i], m=M[i])
tmp3 <- condS(L=L, R=R, M=M, t=R[i], m=M[i])
tmp <- c(tmp1, tmp2, tmp3)
} else if (method=="pa"){
formula <- Surv(time=L, time2=R, type="interval2") ~ M
fit <- ic_par(formula, model = "aft", dist = dist, data=data, weights = NULL)
newdat <- data.frame(M=c(M[i]));
tmp <- 1 - (getFitEsts(fit, newdat, q=c(t, L[i], R[i])));
} else if (method=="sp"){
formula <- Surv(time=L, time2=R, type="interval2") ~ M
fit <- ic_sp(formula, model = "ph", data=data, weights = NULL)
newdat <- data.frame(M=c(M[i]));
tmp <- 1 - (getFitEsts(fit, newdat, q=c(t, L[i], R[i])));
}
positive[i] <- ifelse(R[i]==Inf, 1-(tmp[1]/tmp[2]), ifelse(L[i]==0, (1-tmp[1])/(1-tmp[3]), (tmp[2]-tmp[1])/(tmp[2]-tmp[3])))
}
}
}
negative <- 1 - positive;
return(list(positive = positive, negative = negative));
}
| /R/zzzz.R | no_license | cran/cenROC | R | false | false | 18,190 | r | #' Weighted variance estimation
#'
#' @param X The numeric data vector.
#' @param wt The non-negative weight vector.
#' @param na.rm The character indicator wether to consider missing value(s) or not. The defult is FALSE.
#' @keywords internal
wvar <- function(X, wt, na.rm = FALSE) {
if (na.rm) {
wt <- wt[i <- !is.na(X)]
X <- X[i]
}
wsum <- sum(wt)
wmean = sum(wt * X) / wsum
varr = sum(wt * (X - wmean) ^ 2) / (wsum)
return(varr)
}
#' Weighted quartile estimation
#'
#' @param X The numeric data vector.
#' @param wt The non-negative weight vector.
#' @param p The percentile value. The defult is 0.5.
#' @keywords internal
wquantile <- function(X, wt, p = 0.5)
{
if (!is.numeric(wt) || length(X) != length(wt))
stop("X and wt must be numeric and equal-length vectors")
if (!is.numeric(p) || any(p < 0 | p > 1))
stop("Quartiles must be 0<=p<=1")
if (min(wt) < 0)
stop("Weights must be non-negative numbers")
ord <- order(X)
X <- X[ord]
cusumw <- cumsum(wt[ord])
sumW <- sum(wt)
plist <- cusumw / sumW
qua <- withCallingHandlers(approx(plist, X, p)$y, warning=function(w){invokeRestart("muffleWarning")})
return(qua)
}
#' Weighted inter-quartile range estimation
#'
#' @param X The numeric data vector.
#' @param wt The non-negative weight vector.
#' @keywords internal
wIQR <- function(X, wt) {
(wquantile(X = X, wt = wt, p = 0.75) - wquantile(X = X, wt = wt, p = 0.25))
}
#' Numerical Integral function using Simpson's rule
#'
#' @param x The numeric data vector.
#' @param fx The function.
#' @param n.pts Number of points.
#' @param method The character string specifying method of numerical integration. The possible options are \code{trap} for trapezoidal rule and \code{simps} for simpson'r rule.
#' @importFrom methods is
#' @keywords internal
integ <- function(x, fx, method, n.pts = 256) {
n = length(x)
if (method == "simps") {
if (is.function(fx) == TRUE)
fx = fx(x)
if (n != length(fx))
stop("Unequal input vector lengths")
if (n.pts < 64)
n.pts = 64
ap = approx(x, fx, n = 2 * n.pts + 1)
h = diff(ap$x)[1]
integral = h * (ap$y[2 * (1:n.pts) - 1] + 4 * ap$y[2 * (1:n.pts)] + ap$y[2 * (1:n.pts) + 1]) / 3
value = sum(integral)
}
if (method == "trap") {
if (!is.numeric(x) | !is.numeric(fx))
{
stop('The variable of integration "x" or "fx" is not numeric.')
}
if (length(x) != length(fx))
{
stop("The lengths of the variable of integration and the integrand do not match.")
}
# integrate using the trapezoidal rule
integral <- 0.5 * sum((x[2:(n)] - x[1:(n - 1)]) * (fx[1:(n - 1)] + fx[2:n]))
value <- integral
}
return(value)
}
#' Derivative of normal distribution
#'
#' @param X The numeric data vector.
#' @param ord The order of derivative.
#' @keywords internal
dnorkernel <- function(ord, X)
{
if (ord == 2)
# second derivative
result <- (1 / (sqrt(2 * pi))) * exp(-(X ^ 2) / 2) * ((X ^ 2) - 1)
else if (ord == 4)
# fourth derivative
result <- (1 / (sqrt(2 * pi))) * exp(-(X ^ 2) / 2) * (3 - (6 * (X ^ 2)) + X ^ 4)
else if (ord == 6)
# sixth derivative
result <- (1 / (sqrt(2 * pi))) * exp(-(X ^ 2) / 2) * (X ^ 6 - (15 * (X ^ 4)) + (45 * (X ^ 2)) - 15)
else if (ord == 8)
# eighth derivative
result <- (1 / (sqrt(2 * pi))) * exp(-(X ^ 2) / 2) * (X ^ 8 - (28 * (X ^ 6)) + (210 * (X ^ 4)) - (420 * (X ^ 2)) + 105)
return(result)
}
#' Distribution function without the ith observation
#'
#' @param X The numeric data vector.
#' @param y The vector where the kernel estimation is computed.
#' @param wt The non-negative weight vector.
#' @param ktype A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}".
#' @param bw A numeric bandwidth value.
#' @return Returns the estimated value for the bandwith parameter.
#' @author Kassu Mehari Beyene and Anouar El Ghouch
#' @keywords internal
ker_dis_i <- function(X, y, wt, ktype, bw)
{
n <- length(X);
AUX <- matrix(0, n, n);
zero <- rep(0, n);
ww <- outer(wt, zero, "-");
diag(ww) <- 0;
den <- apply(ww, 2, sum);
resu <- matrix(0, n, length(y));
for (j in 1:length(y))
{
AUX <- matrix(rep.int(outer(y[j], X, "-"), n), nrow = n, byrow = TRUE) / bw;
aux <- kfunc(ktype = ktype, difmat = AUX );
aux1 <- t(wt * t(aux));
diag(aux1) <- 0;
resu[, j] <- (apply(aux1, 1, sum)) / den;
}
return(resu)
}
#' The value of squared integral x^2 k(x) dx and integral x k(x) K(x) dx
#' @param ktype A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}".
#' @keywords internal
muro <- function(ktype)
{
if (ktype == "normal") {
ro <- 2 * 0.28209
mu2 <- 1
} else if (ktype == "epanechnikov") {
ro <- 2 * 0.12857
mu2 <- 1 / 5
} else if (ktype == "biweight") {
ro <- 2 * 0.10823
mu2 <- 1 / 7
} else if (ktype == "triweight") {
ro <- 2 * 0.095183
mu2 <- 1 / 9
}
return(list(ro = ro, mu2 = mu2))
}
#' Kernel distribution function
#'
#' @param X A numeric vector of sample data.
#' @param ktype A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}".
#' @return Returns a vector resulting from evaluating X.
#' @keywords internal
kfunction <- function(ktype, X) {
if (ktype == "normal") {
result <- pnorm(X)
}
else if (ktype == "epanechnikov") {
result <- (0.75 * X * (1 - (X ^ 2) / 3) + 0.5)
}
else if (ktype == "biweight") {
result <- ((15 / 16) * X - (5 / 8) * X ^ 3 + (3 / 16) * X ^ 5 + 0.5)
}
else if (ktype == "triweight") {
result <- ((35 / 32) * X - (35 / 32) * X ^ 3 + (21 / 32) * X ^ 5 - (5 / 32) * X ^ 7 + 0.5)
}
return(result)
}
#' Function to evaluate the matrix of data vector minus the grid points divided by the bandwidth value.
#'
#' @param difmat A numeric matrix of sample data (X) minus evaluation points (x0) divided by bandwidth value (bw).
#' @param ktype A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}". By default, the "\code{normal}" kernel is used.
#' @return Returns the matrix resulting from evaluating \code{difmat}.
#' @keywords internal
kfunc <- function(ktype = "normal", difmat)
{
if (ktype == "normal")
{
estim <- kfunction(ktype = "normal", X = difmat)
}
else if (ktype == "epanechnikov")
{
estim <- difmat
low <- (difmat <= -1)
up <- (difmat >= 1)
btwn <- (difmat > -1 & difmat < 1)
estim[low] <- 0
estim[up] <- 1
value <- estim[btwn]
estim[btwn] <- kfunction(ktype = "epanechnikov", X = value)
}
else if (ktype == "biweight")
{
estim <- difmat
low <- (difmat <= -1)
up <- (difmat >= 1)
btwn <- (difmat > -1 & difmat < 1)
estim[low] <- 0
estim[up] <- 1
value <- estim[btwn]
estim[btwn] <- kfunction(ktype = "biweight", X = value)
}
else if (ktype == "triweight")
{
estim <- difmat
low <- (difmat <= -1)
up <- (difmat >= 1)
btwn <- (difmat > -1 & difmat < 1)
estim[low] <- 0
estim[up] <- 1
value <- estim[btwn]
estim[btwn] <- kfunction(ktype = "triweight", X = value)
}
return(estim)
}
#' ROC estimation function
#'
#' @param U The vector of grid points where the ROC curve is estimated.
#' @param D The event indicator.
#' @param M The numeric vector of marker values for which the time-dependent ROC curves is computed.
#' @param bw The bandwidth parameter for smoothing the ROC function. The possible options are \code{NR} normal reference method; \code{PI} plug-in method and \code{CV} cross-validation method. The default is the \code{NR} normal reference method.
#' @param method is the method of ROC curve estimation. The possible options are \code{emp} emperical metod; \code{untra} smooth without boundary correction and \code{tra} is smooth ROC curve estimation with boundary correction.
#' @param ktype A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}".
#'
#' @author Beyene K. Mehari and El Ghouch Anouar
#'
#' @references Beyene, K. M. and El Ghouch A. (2020). Smoothed time-dependent receiver operating characteristic curve for right censored survival data. \emph{Statistics in Medicine}. 39: 3373– 3396.
#' @keywords internal
RocFun <- function(U, D, M, bw = "NR", method, ktype) {
oM <- order(M)
D <- (D[oM])
nD <- length(D)
sumD <- sum(D)
Z <- 1 - cumsum(1 - D) / (nD - sumD)
AUC <- sum(D * Z) / sumD
if (method == "emp") {
difmat <- (outer(U, Z, "-"))
resul <- (difmat >= 0)
roc1 <- sweep(resul, 2, D, "*")
roc <- apply(roc1, 1, sum) / sumD
bw1 <- NA
}
else if (method == "untra") {
Zt <- Z
Ut <- U
Ztt <- Zt[D != 0]
wt <- D[D != 0]
bw1 <- wbw(X = Ztt, wt = wt, bw = bw, ktype = ktype)$bw
difmat <- (outer(Ut, Ztt, "-")) / bw1
resul <- kfunc(ktype = ktype, difmat = difmat)
w <- wt / sum(wt)
roc1 <- sweep(resul, 2, w, "*")
roc <- apply(roc1, 1, sum)
}
else if (method == "tra") {
mul <- nD / (nD + 1)
Zt <- qnorm(mul * Z + (1 / nD ^ 2))
Ut <- qnorm(mul * U + (1 / nD ^ 2))
Ztt <- Zt[D != 0]
wt <- D[D != 0]
bw1 <- wbw(X = Ztt, wt = wt, bw = bw, ktype = ktype)$bw
difmat <- (outer(Ut, Ztt, "-")) / bw1
resul <- kfunc(ktype = ktype, difmat = difmat)
w <- wt / sum(wt)
roc1 <- sweep(resul, 2, w, "*")
roc <- apply(roc1, 1, sum)
}
else{
stop("The specified method is not correct.")
}
return(list(roc = roc, auc = AUC, bw = bw1))
}
#' Survival probability conditional to the observed data estimation for right censored data.
#'
#' @param Y The numeric vector of event-times or observed times.
#' @param M The numeric vector of marker values for which we want to compute the time-dependent ROC curves.
#' @param censor The censoring indicator, \code{1} if event, \code{0} otherwise.
#' @param t A scaler time point at which we want to compute the time-dependent ROC curve.
#' @param h A scaler for the bandwidth of Beran's weight calculaions. The defualt is using the method of Sheather and Jones (1991).
#' @param kernel A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", , "\code{tricube}", "\code{boxcar}", "\code{triangular}", or "\code{quartic}". The defaults is "\code{normal}" kernel density.
#' @return Return a vectors:
#' @return \code{positive } \code{P(T<t|Y,censor,M)}.
#' @return \code{negative } \code{P(T>t|Y,censor,M)}.
#' @references Beyene, K. M. and El Ghouch A. (2020). Smoothed time-dependent receiver operating characteristic curve for right censored survival data. \emph{Statistics in Medicine}. 39: 3373– 3396.
#' @references Li, Liang, Bo Hu and Tom Greene (2018). A simple method to estimate the time-dependent receiver operating characteristic curve and the area under the curve with right censored data, \emph{Statistical Methods in Medical Research}, 27(8): 2264-2278.
#' @references Pablo Martínez-Camblor and Gustavo F. Bayón and Sonia Pérez-Fernández (2016). Cumulative/dynamic roc curve estimation, \emph{Journal of Statistical Computation and Simulation}, 86(17): 3582-3594.
#' @keywords internal
Csurv <- function(Y, M, censor, t, h = NULL, kernel="normal") {
if (is.null(h)) {
h <- bw.SJ(M, method = "dpi")
}
if(kernel=="normal"){
kernel <- "gaussian"
}
n <- length(M)
positive <- rep(NA, n)
for (i in 1:n) {
if (Y[i] > t) {
positive[i] <- 0
} else {
if (censor[i] == 1) {
positive[i] <- 1
} else {
St <- Beran(time = Y, status = censor, covariate = M, x = M[i], y = t, kernel = kernel, bw = h)
Sy <- Beran(time = Y, status = censor, covariate = M, x = M[i], y = Y[i], kernel = kernel, bw = h)
if (Sy == 0) {
positive[i] <- 1
} else {
positive[i] <- 1 - St / Sy
}
}
}
}
negative <- 1 - positive
return(list(positive = positive, negative = negative))
}
# Function to compute the knots.
# This functions are based on the R package intcensROC.
.knotT <- function(U, V, delta, dim) {
size <- dim - 2
knot_pre_t = c(U[delta != 1], U[delta != 2], V[delta != 2], V[delta != 3])
qt = rep(0, size + 1)
qt[1] = 0
for (i in 2:(size + 1)) qt[i] = quantile(knot_pre_t, (i - 1)/size, name = F,
na.rm = TRUE)
knots = c(qt[1], qt[1], qt, qt[size + 1], qt[size + 1])
}
.knotM <- function(marker, dim) {
size <- dim - 2
knot_pre_m = marker
qt = rep(0, size + 1)
qt[1] = 0
for (i in 2:(size + 1)) qt[i] = quantile(knot_pre_m, (i - 1)/size, name = F,
na.rm = TRUE)
qt[size + 1] = max(marker + 0.1)
knots = c(qt[1], qt[1], qt, qt[size + 1], qt[size + 1])
}
#' Compute the conditional survival function for Interval Censored Survival Data
#'
#' @description A method to compute the survival function for the
#' interval censored survival data based on a spline function based constrained
#' maximum likelihood estimator. The maximization process of likelihood is
#' carried out by generalized gradient projection method.
#' @usage condS(L, R, M, Delta, t, m)
#' @param L The numericvector of left limit of observed time. For left censored observations \code{L == 0}.
#' @param R The numericvector of right limit of observed time. For right censored observation \code{R == inf}.
#' @param M An array contains marker levels for the samples.
#' @param Delta An array of indicator for the censored type, use 1, 2, 3 for
#' event happened before the left bound time, within the defined time range, and
#' after.
#' @param t A scalar indicates the predict time.
#' @param m A scalar for the cutoff of the marker variable.
#' @references Wu, Yuan; Zhang, Ying. Partially monotone tensor spline estimation
#' of the joint distribution function with bivariate current status data.
#' Ann. Statist. 40, 2012, 1609-1636 <doi:10.1214/12-AOS1016>
#' @keywords internal
condS <- function(L, R, M, Delta=NULL, t, m) {
n <- length(L)
U <- L
V <- R
Marker <- M
PredictTime <- t
ind <- (U<=0)
ind1 <- (V==Inf)
if (any(ind1 == TRUE)){
V[ind1] <- 10000000
}
if(is.null(Delta)){
Delta <- rep(2, n)
Delta[ind] <- 1
Delta[ind1] <- 3
}
if (any(Marker < 0))
stop(paste0("Negative marker value found!"))
# detemine the dimension of spline function
size <- length(U)
cadSize <- size^(1/3)
if (cadSize - floor(cadSize) < ceiling(cadSize) - cadSize) {
Dim <- floor(cadSize) + 2
} else {
Dim <- ceiling(cadSize) + 2
}
# compute the knots for time and marker
knotT <- .knotT(U, V, Delta, Dim)
knotM <- .knotM(Marker, Dim)
# compute the thetas
theta = .Call("_cenROC_sieve", PACKAGE = "cenROC", U, V, Marker, Delta,
knotT, knotM, Dim)
m2 <- m - 0.0001
m <- m + 0.0001
Fm = .Call("_cenROC_surva", PACKAGE = "cenROC", theta, m,
m2, PredictTime, knotT, knotM)
Fest <- 1 - Fm
return(Fest)
}
#' Survival probability conditional on the observed data estimation for interval censored data
#'
#' @param L The numericvector of left limit of observed time. For left censored observations \code{L == 0}.
#' @param R The numericvector of right limit of observed time. For right censored observation \code{R == inf}.
#' @param M The numeric vector of marker value.
#' @param t A scaler time point used to calculate the the ROC curve
#' @param method A character indication type of modeling. This include nonparametric \code{"np"},parmetric \code{"pa"} and semiparametric \code{"sp"}.
#' @param dist A character incating the type of distribution for parametric model. This includes are \code{"exponential"}, \code{"weibull"}, \code{"gamma"}, \code{"lnorm"}, \code{"loglogistic"} and \code{"generalgamma"}.
#' @return Return a vectors:
#' @return \code{positive } \code{P(T<t|L,R,M)}.
#' @return \code{negative } \code{P(T>t|L,R,M)}.
#' @references Beyene, K. M. and El Ghouch A. (2022). Time-dependent ROC curve estimation for interval-censored data. \emph{Biometrical Journal}, 64, 1056– 1074.
#' @keywords internal
ICsur <- function( L, R, M, t, method, dist) {
data <- data.frame(L=L, R=R, M=M)
n <- length(M) ;
positive <- rep(NA, n);
for (i in 1:n) {
if (R[i] <= t) {
positive[i] <- 1;
} else {
if (L[i] >= t) {
positive[i] <- 0;
} else {
if (method=="np"){
tmp1 <- condS(L=L, R=R, M=M, t=t, m=M[i])
tmp2 <- condS(L=L, R=R, M=M, t=L[i], m=M[i])
tmp3 <- condS(L=L, R=R, M=M, t=R[i], m=M[i])
tmp <- c(tmp1, tmp2, tmp3)
} else if (method=="pa"){
formula <- Surv(time=L, time2=R, type="interval2") ~ M
fit <- ic_par(formula, model = "aft", dist = dist, data=data, weights = NULL)
newdat <- data.frame(M=c(M[i]));
tmp <- 1 - (getFitEsts(fit, newdat, q=c(t, L[i], R[i])));
} else if (method=="sp"){
formula <- Surv(time=L, time2=R, type="interval2") ~ M
fit <- ic_sp(formula, model = "ph", data=data, weights = NULL)
newdat <- data.frame(M=c(M[i]));
tmp <- 1 - (getFitEsts(fit, newdat, q=c(t, L[i], R[i])));
}
positive[i] <- ifelse(R[i]==Inf, 1-(tmp[1]/tmp[2]), ifelse(L[i]==0, (1-tmp[1])/(1-tmp[3]), (tmp[2]-tmp[1])/(tmp[2]-tmp[3])))
}
}
}
negative <- 1 - positive;
return(list(positive = positive, negative = negative));
}
|
\name{ContactWorker}
\alias{ContactWorker}
\alias{ContactWorkers}
\alias{contact}
\title{Contact Worker(s)}
\description{Contact one or more workers. This sends an email with specified subject line and body text to one or more workers. This can be used to recontact workers in panel/longitudinal research or to send follow-up work. Most likely will need to be used in tandem with \code{\link{GrantBonus}} to implement panels.}
\usage{
ContactWorker( subjects, msgs, workers, batch = FALSE, keypair = credentials(),
print = FALSE, browser = FALSE, log.requests = TRUE, sandbox = FALSE)
}
\arguments{
\item{subjects}{A character string containing subject line of an email, or a vector of character strings of of length equal to the number of workers to be contacted containing the subject line of the email for each worker. Maximum of 200 characters.}
\item{msgs}{A character string containing body text of an email, or a vector of character strings of of length equal to the number of workers to be contacted containing the body text of the email for each worker. Maximum of 4096 characters.}
\item{workers}{A character string containing a WorkerId, or a vector of character strings containing multiple WorkerIds.}
\item{batch}{A logical (default is \code{FALSE}), indicating whether workers should be contacted in batches of 100 (the maximum allowed by the API). This significantly reduces the time required to contact workers, but eliminates the ability to send customized messages to each worker.}
\item{keypair}{A two-item character vector containing an AWS Access Key ID in the first position and the corresponding Secret Access Key in the second position. Set default with \code{\link{credentials}}.}
\item{print}{Optionally print the results of the API request to the standard output. Default is \code{TRUE}.}
\item{browser}{Optionally open the request in the default web browser, rather than opening in R. Default is \code{FALSE}.}
\item{log.requests}{A logical specifying whether API requests should be logged. Default is \code{TRUE}. See \code{\link{readlogfile}} for details.}
\item{sandbox}{Optionally execute the request in the MTurk sandbox rather than the live server. Default is \code{FALSE}.}
}
\details{
Send an email to one or more workers, either with a common subject and body text or subject and body customized for each worker.
In batch mode, workers are contacted in batches of 100. If one email fails (e.g., for one worker) the other emails should be sent successfully. That is to say, the request as a whole will be valid but will return additional information about which workers were not contacted. This information can be found in the MTurkR log file, or by calling the request with \code{browser=TRUE} and viewing the XML responses directly.
\code{ContactWorkers()} and \code{contact()} are aliases.
}
\value{A dataframe containing the list of workers, subjects, and messages, and whether the request to contact each of them was valid.}
\references{
\href{http://docs.amazonwebservices.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_NotifyWorkersOperation.html}{API Reference}
}
\author{Thomas J. Leeper}
%\note{}
%\seealso{}
\examples{
\dontrun{
a <- "Complete a follow-up survey for $.50"
b <- "Thanks for completing my HIT!
I will pay a $.50 bonus if you complete a follow-up survey by Friday at 5:00pm.
The survey can be completed at
http://www.surveymonkey.com/s/pssurvey?c=A1RO9UEXAMPLE."
c1 <- "A1RO9UEXAMPLE"
d <- ContactWorker(subjects=a,msgs=b,workers=c)
c2 <- c("A1RO9EXAMPLE1","A1RO9EXAMPLE2","A1RO9EXAMPLE3")
3 <- ContactWorker(subjects=a,msgs=b,workers=c2)
}
}
\keyword{Workers} | /man/ContactWorker.Rd | no_license | SolomonMg/MTurkR | R | false | false | 3,698 | rd | \name{ContactWorker}
\alias{ContactWorker}
\alias{ContactWorkers}
\alias{contact}
\title{Contact Worker(s)}
\description{Contact one or more workers. This sends an email with specified subject line and body text to one or more workers. This can be used to recontact workers in panel/longitudinal research or to send follow-up work. Most likely will need to be used in tandem with \code{\link{GrantBonus}} to implement panels.}
\usage{
ContactWorker( subjects, msgs, workers, batch = FALSE, keypair = credentials(),
print = FALSE, browser = FALSE, log.requests = TRUE, sandbox = FALSE)
}
\arguments{
\item{subjects}{A character string containing subject line of an email, or a vector of character strings of of length equal to the number of workers to be contacted containing the subject line of the email for each worker. Maximum of 200 characters.}
\item{msgs}{A character string containing body text of an email, or a vector of character strings of of length equal to the number of workers to be contacted containing the body text of the email for each worker. Maximum of 4096 characters.}
\item{workers}{A character string containing a WorkerId, or a vector of character strings containing multiple WorkerIds.}
\item{batch}{A logical (default is \code{FALSE}), indicating whether workers should be contacted in batches of 100 (the maximum allowed by the API). This significantly reduces the time required to contact workers, but eliminates the ability to send customized messages to each worker.}
\item{keypair}{A two-item character vector containing an AWS Access Key ID in the first position and the corresponding Secret Access Key in the second position. Set default with \code{\link{credentials}}.}
\item{print}{Optionally print the results of the API request to the standard output. Default is \code{TRUE}.}
\item{browser}{Optionally open the request in the default web browser, rather than opening in R. Default is \code{FALSE}.}
\item{log.requests}{A logical specifying whether API requests should be logged. Default is \code{TRUE}. See \code{\link{readlogfile}} for details.}
\item{sandbox}{Optionally execute the request in the MTurk sandbox rather than the live server. Default is \code{FALSE}.}
}
\details{
Send an email to one or more workers, either with a common subject and body text or subject and body customized for each worker.
In batch mode, workers are contacted in batches of 100. If one email fails (e.g., for one worker) the other emails should be sent successfully. That is to say, the request as a whole will be valid but will return additional information about which workers were not contacted. This information can be found in the MTurkR log file, or by calling the request with \code{browser=TRUE} and viewing the XML responses directly.
\code{ContactWorkers()} and \code{contact()} are aliases.
}
\value{A dataframe containing the list of workers, subjects, and messages, and whether the request to contact each of them was valid.}
\references{
\href{http://docs.amazonwebservices.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_NotifyWorkersOperation.html}{API Reference}
}
\author{Thomas J. Leeper}
%\note{}
%\seealso{}
\examples{
\dontrun{
a <- "Complete a follow-up survey for $.50"
b <- "Thanks for completing my HIT!
I will pay a $.50 bonus if you complete a follow-up survey by Friday at 5:00pm.
The survey can be completed at
http://www.surveymonkey.com/s/pssurvey?c=A1RO9UEXAMPLE."
c1 <- "A1RO9UEXAMPLE"
d <- ContactWorker(subjects=a,msgs=b,workers=c)
c2 <- c("A1RO9EXAMPLE1","A1RO9EXAMPLE2","A1RO9EXAMPLE3")
3 <- ContactWorker(subjects=a,msgs=b,workers=c2)
}
}
\keyword{Workers} |
#source("C:/Users/TB/Documents/COURSERA DATA SCIENCE test/COURSE4WK1/plot3.r")
#This function creates plot3.png in a local specified directory
plot3<-function() {
library(sqldf)
#We will only be using data from the dates 2007-02-01 and 2007-02-02
data<-read.csv.sql("C:/Users/TB/Documents/COURSERA DATA SCIENCE test/COURSE4WK1/household_power_consumption.txt", sql = "select * from file where V1 = '1/2/2007' ", header=FALSE, sep=";")
data1<-read.csv.sql("C:/Users/TB/Documents/COURSERA DATA SCIENCE test/COURSE4WK1/household_power_consumption.txt", sql = "select * from file where V1 = '2/2/2007' ", header=FALSE, sep=";")
con<-file("C:/Users/TB/Documents/COURSERA DATA SCIENCE test/COURSE4WK1/household_power_consumption.txt")
close(con)
data<-rbind(data, data1)
#Need to concat Date and Time columns together to acquire POSIXct class
dt<-paste(data$V1, data$V2)
t<-strptime(dt, format="%d/%m/%Y %T", tz="UTC")
data<-cbind(data,t)
colnam<-c('Date','Time','Global_active_power','Global_reactive_power','Voltage','Global_intensity','Sub_metering_1','Sub_metering_2','Sub_metering_3', "datetime")
colnames(data)<-colnam
#PLOT 3 - 480X480 as PNG
par(mfrow=c(1,1))
plot(data$datetime, data$Sub_metering_1, type='l', ylab="Energy sub metering", xlab="", col="black")
lines(data$datetime, data$Sub_metering_2, col="red")
lines(data$datetime, data$Sub_metering_3, col="blue")
#Adjusting legend border parameters so the output doesn't get cut off
leg <- legend("topright", lty = 1,
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black","red","blue"),
plot = FALSE)
leftlegendx <- (leg$rect$left - 10000)
rightlegendx <- (leftlegendx + 90000)
toplegendy <- leg$rect$top
bottomlegendy <- (leg$rect$top - leg$rect$h)
legend(x = c(leftlegendx, rightlegendx), y = c(toplegendy, bottomlegendy), lty = 1,
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black","red","blue"))
dev.copy(png, file="C:/Users/TB/Documents/COURSERA DATA SCIENCE test/COURSE4WK1/PLOT3.png")
dev.off()
} | /plot3.r | no_license | susmitabiswas/ExData_Plotting1 | R | false | false | 2,208 | r |
#source("C:/Users/TB/Documents/COURSERA DATA SCIENCE test/COURSE4WK1/plot3.r")
#This function creates plot3.png in a local specified directory
plot3<-function() {
library(sqldf)
#We will only be using data from the dates 2007-02-01 and 2007-02-02
data<-read.csv.sql("C:/Users/TB/Documents/COURSERA DATA SCIENCE test/COURSE4WK1/household_power_consumption.txt", sql = "select * from file where V1 = '1/2/2007' ", header=FALSE, sep=";")
data1<-read.csv.sql("C:/Users/TB/Documents/COURSERA DATA SCIENCE test/COURSE4WK1/household_power_consumption.txt", sql = "select * from file where V1 = '2/2/2007' ", header=FALSE, sep=";")
con<-file("C:/Users/TB/Documents/COURSERA DATA SCIENCE test/COURSE4WK1/household_power_consumption.txt")
close(con)
data<-rbind(data, data1)
#Need to concat Date and Time columns together to acquire POSIXct class
dt<-paste(data$V1, data$V2)
t<-strptime(dt, format="%d/%m/%Y %T", tz="UTC")
data<-cbind(data,t)
colnam<-c('Date','Time','Global_active_power','Global_reactive_power','Voltage','Global_intensity','Sub_metering_1','Sub_metering_2','Sub_metering_3', "datetime")
colnames(data)<-colnam
#PLOT 3 - 480X480 as PNG
par(mfrow=c(1,1))
plot(data$datetime, data$Sub_metering_1, type='l', ylab="Energy sub metering", xlab="", col="black")
lines(data$datetime, data$Sub_metering_2, col="red")
lines(data$datetime, data$Sub_metering_3, col="blue")
#Adjusting legend border parameters so the output doesn't get cut off
leg <- legend("topright", lty = 1,
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black","red","blue"),
plot = FALSE)
leftlegendx <- (leg$rect$left - 10000)
rightlegendx <- (leftlegendx + 90000)
toplegendy <- leg$rect$top
bottomlegendy <- (leg$rect$top - leg$rect$h)
legend(x = c(leftlegendx, rightlegendx), y = c(toplegendy, bottomlegendy), lty = 1,
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black","red","blue"))
dev.copy(png, file="C:/Users/TB/Documents/COURSERA DATA SCIENCE test/COURSE4WK1/PLOT3.png")
dev.off()
} |
#call rm() function to remove all objects
rm(list = ls())
#set working directory
setwd("C:/Users/...")
#get packages
library(mFilter) #for Baxter-King filter
library(tidyverse) #contains ggplot2, dplyr, tidyr, readr, purr, tibble, stringr, forcats, rlang, lubridate, pillar
library(data.table)
#get data
#SF10 uses run-0
run <- read_csv("C:/Users/.../run-0.csv")
df <- as.data.frame(run$time)
df <- df %>% rename(time = "run$time")
df$stock <- run$Stock #stock of inventories
df$inv <- run$investmentConsumeUnit
df$con <- run$consumptionUnit
df$countBankruptcy <- run$countBankruptcy
df$id <- run$id
#get firm size
firmSize <- max(df$id) + 1 #id starts at 0
#stock change
setDT(df)[, stock_change := stock - shift(stock, n=firmSize)]
df[is.na(df)] <- 0
#generate real GDP
df$gdp_real <- df$inv + df$con + df$stock_change
#calculate mean
#mean of column production per group time
mtm <- aggregate(df[, "gdp_real"], list(df$time), mean) #real gdp due to Napoletano et al. (2006)
mtm$sdGDP <- aggregate(df[, "gdp_real"], list(df$time), sd)$gdp_real
mtm$firmBankruptcies <- aggregate(run[, "countBankruptcy"], list(df$time), sum)$countBankruptcy
#rename time
mtm <- mtm %>% rename(time = Group.1)
setDT(mtm)[, change := firmBankruptcies - shift(firmBankruptcies)]
mtm[is.na(mtm)] <- 0
mtm <- transform(mtm, change = ifelse(change < 0, firmBankruptcies, change))
#delete first two rows, because of gdp_nom and consumption calculation
mtm <- mtm[-c(1:2),]
mtm$marginErrorGDP <- qnorm(.95)*(mtm$sdGDP/sqrt(firmSize)) #10% confidence interval
mtm$lowerBoundGDP <- mtm$gdp_real - mtm$marginErrorGDP
mtm$upperBoundGDP <- mtm$gdp_real + mtm$marginErrorGDP
#generate log
mtm$log_gdp <- log(mtm$gdp_real)
mtm$log_change <- log(mtm$change)
mtm <- do.call(data.frame,lapply(mtm, function(log_prod) replace(log_prod, is.infinite(log_prod),0)))
mtm[is.na(mtm)] <- 0
#bandpass filter
#run Baxter-King filter
mtm$bk_gdp <- bkfilter(mtm$log_gdp, pl = 6, pu = 32, nfix = 12)$cycle[, 1]
mtm$bk_change <- bkfilter(mtm$log_change, pl = 6, pu = 32, nfix = 12)$cycle[, 1]
mtm[is.na(mtm)] <- 0
################################################################################
#Generate plot
################################################################################
g_real <-
ggplot(mtm, aes(x = time)) +
geom_line(aes(y = gdp_real, linetype = "GDP (LHS)", colour = "#000000"), size = 0.8) +
geom_bar(aes(y=(change+12)),stat="identity", colour="000000", width = 0.3) +
scale_y_continuous(breaks = c(14,16,18,20,22), expand = c(0,0), sec.axis = sec_axis(~(.-12), name = "Firm bankruptcies (quantity)", breaks = c(0,1,2,3,4,5))) +
scale_linetype_manual(values = c("GDP (LHS)" = "dashed", "Firm bankruptcies (RHS)" = "solid")) +
scale_color_manual(values = c("#000000","#000000")) +
geom_ribbon(aes(ymin = lowerBoundGDP, ymax = upperBoundGDP), alpha = 0.2) +
labs(y = "GDP in production units",
x = "Time in periods",
linetype = "") +
guides(linetype = guide_legend(override.aes = list(size = 0.69) )) +
guides(colour = FALSE) +
theme_bw() +
theme(legend.position = c(0.8, 0.93),
legend.background = element_rect(fill = "white"),
legend.title = element_blank(),
text = element_text(family = "Arial", size = 14),
axis.text = element_text(size = 12),
axis.title.y = element_text(vjust=2.5),
axis.title.y.right = element_text(vjust=2.5)) +
guides(linetype = guide_legend(override.aes = list(size = 0.75), keywidth = 3)) +
xlim(200,300) +
coord_cartesian(ylim=c(12,24))
#save graph in working directory
cairo_pdf("SF10_firm_bankruptcies_counter-cyc_real.pdf", width=8, height=6)
print(g_real)
dev.off()
################################################################################
#Correlogram
################################################################################
#get cross correlation function tables
#gdp and gdp
g <- ccf(mtm$bk_gdp, mtm$bk_gdp, lag.max = 4, type="correlation")
d <- do.call(rbind.data.frame, g)
#get correlation coefficient
f <- d[1,]
#get lag coefficient and set as header
colnames(f) <- (d[4,])
#round to 4 digits
f[nrow(f),] <- round(as.numeric(f[nrow(f),]), 4)
print(f)
#gdp and firmBankruptcies
g <- ccf(mtm$bk_change, mtm$bk_gdp, lag.max = 8, type="correlation")
d <- do.call(rbind.data.frame, g)
#get correlation coefficient
f <- d[1,]
#get lag coefficient and set as header
colnames(f) <- (d[4,])
#round to 4 digits
f[nrow(f),] <- round(as.numeric(f[nrow(f),]), 4)
print(f)
| /R-scripts/SF10_firm_bankruptcies.R | permissive | fhaegner/Mak-h-ro | R | false | false | 4,669 | r | #call rm() function to remove all objects
rm(list = ls())
#set working directory
setwd("C:/Users/...")
#get packages
library(mFilter) #for Baxter-King filter
library(tidyverse) #contains ggplot2, dplyr, tidyr, readr, purr, tibble, stringr, forcats, rlang, lubridate, pillar
library(data.table)
#get data
#SF10 uses run-0
run <- read_csv("C:/Users/.../run-0.csv")
df <- as.data.frame(run$time)
df <- df %>% rename(time = "run$time")
df$stock <- run$Stock #stock of inventories
df$inv <- run$investmentConsumeUnit
df$con <- run$consumptionUnit
df$countBankruptcy <- run$countBankruptcy
df$id <- run$id
#get firm size
firmSize <- max(df$id) + 1 #id starts at 0
#stock change
setDT(df)[, stock_change := stock - shift(stock, n=firmSize)]
df[is.na(df)] <- 0
#generate real GDP
df$gdp_real <- df$inv + df$con + df$stock_change
#calculate mean
#mean of column production per group time
mtm <- aggregate(df[, "gdp_real"], list(df$time), mean) #real gdp due to Napoletano et al. (2006)
mtm$sdGDP <- aggregate(df[, "gdp_real"], list(df$time), sd)$gdp_real
mtm$firmBankruptcies <- aggregate(run[, "countBankruptcy"], list(df$time), sum)$countBankruptcy
#rename time
mtm <- mtm %>% rename(time = Group.1)
setDT(mtm)[, change := firmBankruptcies - shift(firmBankruptcies)]
mtm[is.na(mtm)] <- 0
mtm <- transform(mtm, change = ifelse(change < 0, firmBankruptcies, change))
#delete first two rows, because of gdp_nom and consumption calculation
mtm <- mtm[-c(1:2),]
mtm$marginErrorGDP <- qnorm(.95)*(mtm$sdGDP/sqrt(firmSize)) #10% confidence interval
mtm$lowerBoundGDP <- mtm$gdp_real - mtm$marginErrorGDP
mtm$upperBoundGDP <- mtm$gdp_real + mtm$marginErrorGDP
#generate log
mtm$log_gdp <- log(mtm$gdp_real)
mtm$log_change <- log(mtm$change)
mtm <- do.call(data.frame,lapply(mtm, function(log_prod) replace(log_prod, is.infinite(log_prod),0)))
mtm[is.na(mtm)] <- 0
#bandpass filter
#run Baxter-King filter
mtm$bk_gdp <- bkfilter(mtm$log_gdp, pl = 6, pu = 32, nfix = 12)$cycle[, 1]
mtm$bk_change <- bkfilter(mtm$log_change, pl = 6, pu = 32, nfix = 12)$cycle[, 1]
mtm[is.na(mtm)] <- 0
################################################################################
#Generate plot
################################################################################
g_real <-
ggplot(mtm, aes(x = time)) +
geom_line(aes(y = gdp_real, linetype = "GDP (LHS)", colour = "#000000"), size = 0.8) +
geom_bar(aes(y=(change+12)),stat="identity", colour="000000", width = 0.3) +
scale_y_continuous(breaks = c(14,16,18,20,22), expand = c(0,0), sec.axis = sec_axis(~(.-12), name = "Firm bankruptcies (quantity)", breaks = c(0,1,2,3,4,5))) +
scale_linetype_manual(values = c("GDP (LHS)" = "dashed", "Firm bankruptcies (RHS)" = "solid")) +
scale_color_manual(values = c("#000000","#000000")) +
geom_ribbon(aes(ymin = lowerBoundGDP, ymax = upperBoundGDP), alpha = 0.2) +
labs(y = "GDP in production units",
x = "Time in periods",
linetype = "") +
guides(linetype = guide_legend(override.aes = list(size = 0.69) )) +
guides(colour = FALSE) +
theme_bw() +
theme(legend.position = c(0.8, 0.93),
legend.background = element_rect(fill = "white"),
legend.title = element_blank(),
text = element_text(family = "Arial", size = 14),
axis.text = element_text(size = 12),
axis.title.y = element_text(vjust=2.5),
axis.title.y.right = element_text(vjust=2.5)) +
guides(linetype = guide_legend(override.aes = list(size = 0.75), keywidth = 3)) +
xlim(200,300) +
coord_cartesian(ylim=c(12,24))
#save graph in working directory
cairo_pdf("SF10_firm_bankruptcies_counter-cyc_real.pdf", width=8, height=6)
print(g_real)
dev.off()
################################################################################
#Correlogram
################################################################################
#get cross correlation function tables
#gdp and gdp
g <- ccf(mtm$bk_gdp, mtm$bk_gdp, lag.max = 4, type="correlation")
d <- do.call(rbind.data.frame, g)
#get correlation coefficient
f <- d[1,]
#get lag coefficient and set as header
colnames(f) <- (d[4,])
#round to 4 digits
f[nrow(f),] <- round(as.numeric(f[nrow(f),]), 4)
print(f)
#gdp and firmBankruptcies
g <- ccf(mtm$bk_change, mtm$bk_gdp, lag.max = 8, type="correlation")
d <- do.call(rbind.data.frame, g)
#get correlation coefficient
f <- d[1,]
#get lag coefficient and set as header
colnames(f) <- (d[4,])
#round to 4 digits
f[nrow(f),] <- round(as.numeric(f[nrow(f),]), 4)
print(f)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_regressions.R
\name{match_reg}
\alias{match_reg}
\title{Perform the traditional single equation regression approach under the
assumption that \code{x1, x2} are optimally chosen.}
\usage{
match_reg(dat, method = "traditional")
}
\arguments{
\item{dat}{A simulated dataset with 5 columns.}
}
\value{
A list with the regression object and the name of the method
}
\description{
Perform the traditional single equation regression approach under the
assumption that \code{x1, x2} are optimally chosen.
}
\seealso{
interaction_reg run_regression format_reg
Other tests: \code{\link{cond_reg}},
\code{\link{interaction_reg}},
\code{\link{run_regression}}, \code{\link{sur_reg}}
}
| /man/match_reg.Rd | permissive | stijnmasschelein/simcompl | R | false | true | 760 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_regressions.R
\name{match_reg}
\alias{match_reg}
\title{Perform the traditional single equation regression approach under the
assumption that \code{x1, x2} are optimally chosen.}
\usage{
match_reg(dat, method = "traditional")
}
\arguments{
\item{dat}{A simulated dataset with 5 columns.}
}
\value{
A list with the regression object and the name of the method
}
\description{
Perform the traditional single equation regression approach under the
assumption that \code{x1, x2} are optimally chosen.
}
\seealso{
interaction_reg run_regression format_reg
Other tests: \code{\link{cond_reg}},
\code{\link{interaction_reg}},
\code{\link{run_regression}}, \code{\link{sur_reg}}
}
|
library(linear.tools)
### Name: deleting_wrongeffect
### Title: check monotonicity of marginal impacts and re-estimate the
### model.
### Aliases: deleting_wrongeffect
### ** Examples
##
set.seed(413)
traing_data = ggplot2::diamonds[runif(nrow(ggplot2::diamonds))<0.05,]
nrow(traing_data)
diamond_lm3 = lm(formula = price ~ carat + I(carat^2) + I(carat^3) + cut +
I(carat * depth) , data = traing_data)
test = deleting_wrongeffect(model = diamond_lm3,
focus_var_raw = 'carat',
focus_var_model = c("I(carat^3)","I(carat*depth)",
"I(carat^2)","I(carat)"),
focus_value = list(carat=seq(0.5,6,0.1)),
data = traing_data,
PRINT = TRUE,STOP = FALSE,
Reverse = FALSE)
## two focus on vars
test =
deleting_wrongeffect(model = diamond_lm3 ,
focus_var_raw = c('carat',"cut"),
focus_var_model = c("I(carat*depth)","I(carat^3)"),
focus_value = list(carat=seq(0.5,6,0.1)),
data = traing_data,PRINT = TRUE,STOP =FALSE)
diamond_lm3 = lm(formula = price ~ cut + depth +
I(carat * depth) , data = ggplot2::diamonds)
## negative signs
deleting_wrongeffect(model = diamond_lm3 ,
focus_var_raw = c('depth',"cut"),
focus_var_model = c("depth"),Monoton_to_Match = -1,
data = ggplot2::diamonds,PRINT = TRUE,STOP =FALSE)
## wrong variables names
deleting_wrongeffect(diamond_lm3, focus_var_raw = 'carat',
focus_var_model = c("I(cara79t^3)"),
data = ggplot2::diamonds,PRINT = TRUE)
deleting_wrongeffect(diamond_lm3, focus_var_raw = 'carat890',
focus_var_model = c("I(carat^3)"),
data = ggplot2::diamonds, PRINT = TRUE)
| /data/genthat_extracted_code/linear.tools/examples/deleting_wrongeffect.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,008 | r | library(linear.tools)
### Name: deleting_wrongeffect
### Title: check monotonicity of marginal impacts and re-estimate the
### model.
### Aliases: deleting_wrongeffect
### ** Examples
##
set.seed(413)
traing_data = ggplot2::diamonds[runif(nrow(ggplot2::diamonds))<0.05,]
nrow(traing_data)
diamond_lm3 = lm(formula = price ~ carat + I(carat^2) + I(carat^3) + cut +
I(carat * depth) , data = traing_data)
test = deleting_wrongeffect(model = diamond_lm3,
focus_var_raw = 'carat',
focus_var_model = c("I(carat^3)","I(carat*depth)",
"I(carat^2)","I(carat)"),
focus_value = list(carat=seq(0.5,6,0.1)),
data = traing_data,
PRINT = TRUE,STOP = FALSE,
Reverse = FALSE)
## two focus on vars
test =
deleting_wrongeffect(model = diamond_lm3 ,
focus_var_raw = c('carat',"cut"),
focus_var_model = c("I(carat*depth)","I(carat^3)"),
focus_value = list(carat=seq(0.5,6,0.1)),
data = traing_data,PRINT = TRUE,STOP =FALSE)
diamond_lm3 = lm(formula = price ~ cut + depth +
I(carat * depth) , data = ggplot2::diamonds)
## negative signs
deleting_wrongeffect(model = diamond_lm3 ,
focus_var_raw = c('depth',"cut"),
focus_var_model = c("depth"),Monoton_to_Match = -1,
data = ggplot2::diamonds,PRINT = TRUE,STOP =FALSE)
## wrong variables names
deleting_wrongeffect(diamond_lm3, focus_var_raw = 'carat',
focus_var_model = c("I(cara79t^3)"),
data = ggplot2::diamonds,PRINT = TRUE)
deleting_wrongeffect(diamond_lm3, focus_var_raw = 'carat890',
focus_var_model = c("I(carat^3)"),
data = ggplot2::diamonds, PRINT = TRUE)
|
###DATA TRANSFORMATION AND OUTLIER DETECTION###
#1. Load MICE Imputed Data
library("dplyr", lib.loc="~/R/win-library/3.5")
setwd("C:/Users/sorel/Desktop/Paper ICA/Scripts")
MiceImputed=data.frame(read.csv("MiceImputed.csv", header = TRUE, sep = ","))
MiceImputed=MiceImputed[,3:dim(MiceImputed)[2]]
DataFeeders=MiceImputed[,2:dim(MiceImputed)[2]]
##########################################################2. OUTLIER FLAGGING PIPELINE##########################################################
#2.1 LOAD FACTOR OUTLIERS
library("lubridate", lib.loc="~/R/win-library/3.5")
library("reshape2", lib.loc="~/R/win-library/3.5")
library("ggplot2", lib.loc="~/R/win-library/3.5")
library("Amelia", lib.loc="~/R/win-library/3.5")
library("mice", lib.loc="~/R/win-library/3.5")
library("naniar", lib.loc="~/R/win-library/3.5")
DataFeeders.LF=DataFeeders
DataFeeders.LF$Date=as.Date(MiceImputed[,1])
DataFeeders.LF$Week=week(as.POSIXct(MiceImputed[,1]))
##############################################################2.1.1 DAILY LOAD FACTOR##########################################################
#Long Format#
DataFeedersLong=melt(DataFeeders.LF,
# ID variables - all the variables to keep but not split apart on
id.vars=c("Date", "Week"),
# The source columns
measure.vars=colnames(DataFeeders.LF[,1:355]),
# Name of the destination column that will identify the original
# column that the measurement came from
variable.name="Alim_ID",
value.name="Measurement"
)
DailyMeanDataFeeders=data.frame(DataFeedersLong %>% select(Date, Alim_ID, Measurement)
%>%group_by(Date, Alim_ID) %>% summarise(Mean = mean(abs(Measurement), na.rm=TRUE)))
DailyMaxDataFeeders=data.frame(DataFeedersLong %>% select(Date, Alim_ID, Measurement)
%>% group_by(Date, Alim_ID) %>% summarise(Max = max(abs(Measurement), na.rm=TRUE)))
DailyLoadFactor=data.frame(DailyMeanDataFeeders$Date, DailyMeanDataFeeders$Alim_ID, (DailyMeanDataFeeders$Mean/DailyMaxDataFeeders$Max))
names(DailyLoadFactor)=c("Date", "Alim_ID", "Load.Factor")
#Wide Format#
LoadFactorDataFeeders=dcast(DailyLoadFactor, Date ~ Alim_ID, fun.aggregate=mean, value.var = "Load.Factor")
PctNA=data.frame(t(data.frame(LoadFactorDataFeeders[,2:length(LoadFactorDataFeeders)] %>% dplyr::select(everything())
%>% summarise_all(funs(sum(is.na(.))))))*(1/dim(LoadFactorDataFeeders)[1])*100)
PctNA$Alim_ID=row.names(PctNA)
names(PctNA)=c("pct.NA", "Alim_ID")
Daily.LF.Outliers=PctNA%>%dplyr::filter(pct.NA>0)%>%dplyr::select(Alim_ID)
#2.1.2 WEEKLY LOAD FACTOR
#Long Format#
WeeklyMeanDataFeeders=data.frame(DataFeedersLong %>% select(Week, Alim_ID, Measurement)
%>%group_by(Week, Alim_ID) %>% summarise(Mean = mean(abs(Measurement), na.rm=TRUE)))
WeeklyMaxDataFeeders=data.frame(DataFeedersLong %>% select(Week, Alim_ID, Measurement)
%>% group_by(Week, Alim_ID) %>% summarise(Max = max(abs(Measurement), na.rm=TRUE)))
WeeklyLoadFactor=data.frame(WeeklyMeanDataFeeders$Week, WeeklyMeanDataFeeders$Alim_ID, (WeeklyMeanDataFeeders$Mean/WeeklyMaxDataFeeders$Max))
names(WeeklyLoadFactor)=c("Week", "Alim_ID", "Load.Factor")
#Wide Format#
LoadFactorDataFeeders2=dcast(WeeklyLoadFactor, Week ~ Alim_ID, fun.aggregate=mean, value.var = "Load.Factor")
PctNA2=data.frame(t(data.frame(LoadFactorDataFeeders2[,2:length(LoadFactorDataFeeders2)] %>% dplyr::select(everything())
%>% summarise_all(funs(sum(is.na(.))))))*(1/dim(LoadFactorDataFeeders)[1])*100)
PctNA2$Alim_ID=row.names(PctNA2)
names(PctNA2)=c("pct.NA2", "Alim_ID")
Weekly.LF.Outliers=PctNA2%>%dplyr::filter(pct.NA2>0)%>%dplyr::select(Alim_ID)
######2.1.3 OUTPUT OF LOAD FACTOR OUTLIERS######
Load.Factor.Outliers=as.character(t(left_join(Daily.LF.Outliers, Weekly.LF.Outliers, by=c("Alim_ID", "Alim_ID"))))
idx=match(Load.Factor.Outliers, names(DataFeeders))
Output.LF.Outliers=DataFeeders[,-idx]
######################################################2.2 FREQUENCY/PERIOD OUTLIERS###############################################################
library("aTSA", lib.loc="~/R/win-library/3.5")
library("TSA", lib.loc="~/R/win-library/3.5")
#2.2.1 Generate Periodgrams using spec.gram(unbiased with taper=0.1)
p=spec.pgram(Output.LF.Outliers[,1], plot=FALSE, taper=0.1, demean=TRUE)
freq=p$freq[which(p$spec>=sort(p$spec, decreasing = TRUE)[2], arr.ind = TRUE)]#Find top 3 values
M=rbind.data.frame(freq)
names(M)=c("Top1", "Top2")
#Iterate over the set of feeders
for (i in 2:dim(Output.LF.Outliers)[2]){
p=spec.pgram(Output.LF.Outliers[,i], plot=FALSE, taper=0.1, demean=TRUE)
freq=p$freq[which(p$spec>=sort(p$spec, decreasing = TRUE)[2], arr.ind = TRUE)]#Find top 3 values
M=rbind.data.frame(M, freq)
}
M=data.frame(colnames(Output.LF.Outliers), M)
names(M)=c("Alim_ID", "Top1", "Top2")
M$ID=as.numeric(gsub("[a-zA-Z_ ]", "", M$Alim_ID))
#2.2.2 Transform M into daily periods 1/f=Period
PM=data.frame(M[,1], M[,4], round(1/M[,2]), round(1/M[,3]))
names(PM)=c("Alim_ID", "ID", "Period.1", "Period.2")
#Hourly in a year: 1/(365*24)=0.0001141553
#Hourly in a weeek: 1/(7*24)=0.005952381
#2.2.3 Compute Period Anomalies
Period.1.counts=data.frame(PM %>% count(Period.1, sort = TRUE))
Period.2.counts=data.frame(PM %>% count(Period.2, sort = TRUE))
#2.2.4 Flag Period Outliers
PM.1=left_join(PM, Period.1.counts, by = c("Period.1" = "Period.1"))
names(PM.1)=c("Alim_ID", "ID", "Period.1", "Period.2", "n.Period.1")
PM.2=left_join(PM.1, Period.2.counts, by=c("Period.2" = "Period.2"))
names(PM.2)=c("Alim_ID", "ID", "Period.1", "Period.2", "n.Period.1", "n.Period.2")
Period.outliers=PM.2%>%dplyr::filter(n.Period.1<(dim(Output.LF.Outliers)[2]*0.1) & n.Period.2<(dim(Output.LF.Outliers)[2]*0.1))%>%select(Alim_ID)
######2.2.5 OUTPUT OF PERIOD OUTLIERS######
Anomally.Period.Outliers=as.character(t(Period.outliers))
idx=match(Anomally.Period.Outliers, names(Output.LF.Outliers))
Output.Period.Outliers=Output.LF.Outliers[,-idx]
######################################################################2.3 UN-BALANCED CLUSTER OUTLIERS############################################
library("dplyr", lib.loc="~/R/win-library/3.5")
library("tsoutliers", lib.loc="~/R/win-library/3.5")
library("TSclust", lib.loc="~/R/win-library/3.5")
library("factoextra", lib.loc="~/R/win-library/3.5")
library("dendextend", lib.loc="~/R/win-library/3.5")
library("cluster", lib.loc="C:/Program Files/R/R-3.5.1/library")
library("NbClust", lib.loc="~/R/win-library/3.5")
#2.3.1 Compute Dissimilarity Matrix
TSMatrix2=t(as.matrix(Output.Period.Outliers))
DissMat2=diss(TSMatrix2, "INT.PER")
#2.3.2 Select Number of Clusters
#DoParallel routine
require("doParallel")
# Create parallel workers
workers <- makeCluster(6L)
# Preload dtwclust in each worker; not necessary but useful
invisible(clusterEvalQ(workers, library("factoextra")))
# Register the backend; this step MUST be done
registerDoParallel(workers)
set.seed(12345)
AvgSilhouette=fviz_nbclust(Output.Period.Outliers, FUN = hcut, hc_func=c("hclust"), hc_method=c("complete"), method = "silhouette")
# Stop parallel workers
stopCluster(workers)
# Go back to sequential computation
registerDoSEQ()
#Extract Maximum Silhouette
Cluster.Sil=AvgSilhouette$data
CLUS.NUM=as.numeric(Cluster.Sil[which.max(Cluster.Sil$y),1])
#2.3.2 Hierarchical Aglomerative Clustering Complete Linkage
hcPER.complete=hclust(DissMat2, method="complete")
plot(hcPER.complete)
rect.hclust(hcPER.complete, k=CLUS.NUM, border=2:12)
sub_grp.complete=cutree(hcPER.complete, k = CLUS.NUM)
t=data.frame(sub_grp.complete)
Output.HClust=data.frame(row.names(t), t, row.names=NULL)
names(Output.HClust)=c("Alim_ID", "Cluster")
#2.3.3 Construct Summary DataFrame
ClusterSum=data.frame(Output.HClust%>%dplyr::count(Cluster))
#2.3.4 Filter by less than 10% of observations
Outlier.Clust=ClusterSum%>%dplyr::filter(n<dim(DissMat2)[2]*0.1)%>%dplyr::select(Cluster)
#2.3.5 Flag Alim_ID associated only with Outlier.Clust
H.Clust.Outliers=data.frame()
for (i in 1:dim(Outlier.Clust)[1]){
H.Clust.Outliers=rbind.data.frame(H.Clust.Outliers,Output.HClust%>%filter(Cluster==Outlier.Clust[i,1])%>%select(Alim_ID))
}
######2.3.5 OUTPUT OF UN-BALANCED CLUSTER OUTLIERS######
Un.Balanced.Cluster.Outliers=as.character(t(H.Clust.Outliers))
idx=match(Un.Balanced.Cluster.Outliers, names(Output.Period.Outliers))
Output.Un.Balanced.Cluster.Outliers=Output.Period.Outliers[,-idx]
CleanedDataSet=data.frame(MiceImputed[,1],Output.Un.Balanced.Cluster.Outliers)
write.csv(CleanedDataSet, file = "CleanedFeeder.csv", sep=",")
###################################################3. Transform Series into MultipleSeasonal Time Series#########################################
#Multiple Seasonal Adjustment for hourly and weekly seasonalities for TS with frequency=3600 (hourly measurements)
library("aTSA", lib.loc="~/R/win-library/3.5")
library("tseries", lib.loc="~/R/win-library/3.5")
library("TSA", lib.loc="~/R/win-library/3.5")
library("forecast", lib.loc="~/R/win-library/3.5")
#3.1 Noise (Season Adjustment substracting the seasonal pattern)
SDS=data.frame(seasadj(stl(msts(Output.Un.Balanced.Cluster.Outliers[,1], seasonal.periods=c(24,7*24)), "periodic")))
for (i in 2:dim(Output.Un.Balanced.Cluster.Outliers)[2]){
SDS=data.frame(SDS, seasadj(stl(msts(Output.Un.Balanced.Cluster.Outliers[,i], seasonal.periods=c(24,7*24)), "periodic")))
}
SDS=data.frame(MiceImputed[,1], SDS)
names(SDS)=c("DateTime", paste0("Noise_",colnames(Output.Un.Balanced.Cluster.Outliers)))
write.csv(SDS, file = "Noise_Term.csv", sep=",")
#3.2 Seasonal Pattern (Substracting the noice from the obtained TS)
SeasonD=data.frame(Output.Un.Balanced.Cluster.Outliers[,1]-seasadj(stl(msts(Output.Un.Balanced.Cluster.Outliers[,1],
seasonal.periods=c(24,7*24)), "periodic")))
for (i in 2:dim(Output.Un.Balanced.Cluster.Outliers)[2]){
SeasonD=data.frame(SeasonD, Output.Un.Balanced.Cluster.Outliers[,i]-seasadj(stl(msts(Output.Un.Balanced.Cluster.Outliers[,i],
seasonal.periods=c(24,7*24)), "periodic")))
}
SeasonD=data.frame(MiceImputed[,1], SeasonD)
names(SeasonD)=c("DateTime", paste0("Season_",colnames(Output.Un.Balanced.Cluster.Outliers)))
write.csv(SeasonD, file = "Seasonal_Term.csv", sep=",")
#3.3 Noise+Seasonal Data Set
Noise_Season_df=cbind.data.frame(SeasonD, SDS[,-1])
write.csv(Noise_Season_df, file = "Noise_Season_Output.csv", sep=",")
| /Outlier_Transformation.R | no_license | Naitsabes1990CL/Lu-Rajapkse-s-cICA-Algorithm-R-Implementation | R | false | false | 10,926 | r | ###DATA TRANSFORMATION AND OUTLIER DETECTION###
#1. Load MICE Imputed Data
library("dplyr", lib.loc="~/R/win-library/3.5")
setwd("C:/Users/sorel/Desktop/Paper ICA/Scripts")
MiceImputed=data.frame(read.csv("MiceImputed.csv", header = TRUE, sep = ","))
MiceImputed=MiceImputed[,3:dim(MiceImputed)[2]]
DataFeeders=MiceImputed[,2:dim(MiceImputed)[2]]
##########################################################2. OUTLIER FLAGGING PIPELINE##########################################################
#2.1 LOAD FACTOR OUTLIERS
library("lubridate", lib.loc="~/R/win-library/3.5")
library("reshape2", lib.loc="~/R/win-library/3.5")
library("ggplot2", lib.loc="~/R/win-library/3.5")
library("Amelia", lib.loc="~/R/win-library/3.5")
library("mice", lib.loc="~/R/win-library/3.5")
library("naniar", lib.loc="~/R/win-library/3.5")
DataFeeders.LF=DataFeeders
DataFeeders.LF$Date=as.Date(MiceImputed[,1])
DataFeeders.LF$Week=week(as.POSIXct(MiceImputed[,1]))
##############################################################2.1.1 DAILY LOAD FACTOR##########################################################
#Long Format#
DataFeedersLong=melt(DataFeeders.LF,
# ID variables - all the variables to keep but not split apart on
id.vars=c("Date", "Week"),
# The source columns
measure.vars=colnames(DataFeeders.LF[,1:355]),
# Name of the destination column that will identify the original
# column that the measurement came from
variable.name="Alim_ID",
value.name="Measurement"
)
DailyMeanDataFeeders=data.frame(DataFeedersLong %>% select(Date, Alim_ID, Measurement)
%>%group_by(Date, Alim_ID) %>% summarise(Mean = mean(abs(Measurement), na.rm=TRUE)))
DailyMaxDataFeeders=data.frame(DataFeedersLong %>% select(Date, Alim_ID, Measurement)
%>% group_by(Date, Alim_ID) %>% summarise(Max = max(abs(Measurement), na.rm=TRUE)))
DailyLoadFactor=data.frame(DailyMeanDataFeeders$Date, DailyMeanDataFeeders$Alim_ID, (DailyMeanDataFeeders$Mean/DailyMaxDataFeeders$Max))
names(DailyLoadFactor)=c("Date", "Alim_ID", "Load.Factor")
#Wide Format#
LoadFactorDataFeeders=dcast(DailyLoadFactor, Date ~ Alim_ID, fun.aggregate=mean, value.var = "Load.Factor")
PctNA=data.frame(t(data.frame(LoadFactorDataFeeders[,2:length(LoadFactorDataFeeders)] %>% dplyr::select(everything())
%>% summarise_all(funs(sum(is.na(.))))))*(1/dim(LoadFactorDataFeeders)[1])*100)
PctNA$Alim_ID=row.names(PctNA)
names(PctNA)=c("pct.NA", "Alim_ID")
Daily.LF.Outliers=PctNA%>%dplyr::filter(pct.NA>0)%>%dplyr::select(Alim_ID)
#2.1.2 WEEKLY LOAD FACTOR
#Long Format#
WeeklyMeanDataFeeders=data.frame(DataFeedersLong %>% select(Week, Alim_ID, Measurement)
%>%group_by(Week, Alim_ID) %>% summarise(Mean = mean(abs(Measurement), na.rm=TRUE)))
WeeklyMaxDataFeeders=data.frame(DataFeedersLong %>% select(Week, Alim_ID, Measurement)
%>% group_by(Week, Alim_ID) %>% summarise(Max = max(abs(Measurement), na.rm=TRUE)))
WeeklyLoadFactor=data.frame(WeeklyMeanDataFeeders$Week, WeeklyMeanDataFeeders$Alim_ID, (WeeklyMeanDataFeeders$Mean/WeeklyMaxDataFeeders$Max))
names(WeeklyLoadFactor)=c("Week", "Alim_ID", "Load.Factor")
#Wide Format#
LoadFactorDataFeeders2=dcast(WeeklyLoadFactor, Week ~ Alim_ID, fun.aggregate=mean, value.var = "Load.Factor")
PctNA2=data.frame(t(data.frame(LoadFactorDataFeeders2[,2:length(LoadFactorDataFeeders2)] %>% dplyr::select(everything())
%>% summarise_all(funs(sum(is.na(.))))))*(1/dim(LoadFactorDataFeeders)[1])*100)
PctNA2$Alim_ID=row.names(PctNA2)
names(PctNA2)=c("pct.NA2", "Alim_ID")
Weekly.LF.Outliers=PctNA2%>%dplyr::filter(pct.NA2>0)%>%dplyr::select(Alim_ID)
######2.1.3 OUTPUT OF LOAD FACTOR OUTLIERS######
Load.Factor.Outliers=as.character(t(left_join(Daily.LF.Outliers, Weekly.LF.Outliers, by=c("Alim_ID", "Alim_ID"))))
idx=match(Load.Factor.Outliers, names(DataFeeders))
Output.LF.Outliers=DataFeeders[,-idx]
######################################################2.2 FREQUENCY/PERIOD OUTLIERS###############################################################
library("aTSA", lib.loc="~/R/win-library/3.5")
library("TSA", lib.loc="~/R/win-library/3.5")
#2.2.1 Generate Periodgrams using spec.gram(unbiased with taper=0.1)
p=spec.pgram(Output.LF.Outliers[,1], plot=FALSE, taper=0.1, demean=TRUE)
freq=p$freq[which(p$spec>=sort(p$spec, decreasing = TRUE)[2], arr.ind = TRUE)]#Find top 3 values
M=rbind.data.frame(freq)
names(M)=c("Top1", "Top2")
#Iterate over the set of feeders
for (i in 2:dim(Output.LF.Outliers)[2]){
p=spec.pgram(Output.LF.Outliers[,i], plot=FALSE, taper=0.1, demean=TRUE)
freq=p$freq[which(p$spec>=sort(p$spec, decreasing = TRUE)[2], arr.ind = TRUE)]#Find top 3 values
M=rbind.data.frame(M, freq)
}
M=data.frame(colnames(Output.LF.Outliers), M)
names(M)=c("Alim_ID", "Top1", "Top2")
M$ID=as.numeric(gsub("[a-zA-Z_ ]", "", M$Alim_ID))
#2.2.2 Transform M into daily periods 1/f=Period
PM=data.frame(M[,1], M[,4], round(1/M[,2]), round(1/M[,3]))
names(PM)=c("Alim_ID", "ID", "Period.1", "Period.2")
#Hourly in a year: 1/(365*24)=0.0001141553
#Hourly in a weeek: 1/(7*24)=0.005952381
#2.2.3 Compute Period Anomalies
Period.1.counts=data.frame(PM %>% count(Period.1, sort = TRUE))
Period.2.counts=data.frame(PM %>% count(Period.2, sort = TRUE))
#2.2.4 Flag Period Outliers
PM.1=left_join(PM, Period.1.counts, by = c("Period.1" = "Period.1"))
names(PM.1)=c("Alim_ID", "ID", "Period.1", "Period.2", "n.Period.1")
PM.2=left_join(PM.1, Period.2.counts, by=c("Period.2" = "Period.2"))
names(PM.2)=c("Alim_ID", "ID", "Period.1", "Period.2", "n.Period.1", "n.Period.2")
Period.outliers=PM.2%>%dplyr::filter(n.Period.1<(dim(Output.LF.Outliers)[2]*0.1) & n.Period.2<(dim(Output.LF.Outliers)[2]*0.1))%>%select(Alim_ID)
######2.2.5 OUTPUT OF PERIOD OUTLIERS######
Anomally.Period.Outliers=as.character(t(Period.outliers))
idx=match(Anomally.Period.Outliers, names(Output.LF.Outliers))
Output.Period.Outliers=Output.LF.Outliers[,-idx]
######################################################################2.3 UN-BALANCED CLUSTER OUTLIERS############################################
library("dplyr", lib.loc="~/R/win-library/3.5")
library("tsoutliers", lib.loc="~/R/win-library/3.5")
library("TSclust", lib.loc="~/R/win-library/3.5")
library("factoextra", lib.loc="~/R/win-library/3.5")
library("dendextend", lib.loc="~/R/win-library/3.5")
library("cluster", lib.loc="C:/Program Files/R/R-3.5.1/library")
library("NbClust", lib.loc="~/R/win-library/3.5")
#2.3.1 Compute Dissimilarity Matrix
TSMatrix2=t(as.matrix(Output.Period.Outliers))
DissMat2=diss(TSMatrix2, "INT.PER")
#2.3.2 Select Number of Clusters
#DoParallel routine
require("doParallel")
# Create parallel workers
workers <- makeCluster(6L)
# Preload dtwclust in each worker; not necessary but useful
invisible(clusterEvalQ(workers, library("factoextra")))
# Register the backend; this step MUST be done
registerDoParallel(workers)
set.seed(12345)
AvgSilhouette=fviz_nbclust(Output.Period.Outliers, FUN = hcut, hc_func=c("hclust"), hc_method=c("complete"), method = "silhouette")
# Stop parallel workers
stopCluster(workers)
# Go back to sequential computation
registerDoSEQ()
#Extract Maximum Silhouette
Cluster.Sil=AvgSilhouette$data
CLUS.NUM=as.numeric(Cluster.Sil[which.max(Cluster.Sil$y),1])
#2.3.2 Hierarchical Aglomerative Clustering Complete Linkage
hcPER.complete=hclust(DissMat2, method="complete")
plot(hcPER.complete)
rect.hclust(hcPER.complete, k=CLUS.NUM, border=2:12)
sub_grp.complete=cutree(hcPER.complete, k = CLUS.NUM)
t=data.frame(sub_grp.complete)
Output.HClust=data.frame(row.names(t), t, row.names=NULL)
names(Output.HClust)=c("Alim_ID", "Cluster")
#2.3.3 Construct Summary DataFrame
ClusterSum=data.frame(Output.HClust%>%dplyr::count(Cluster))
#2.3.4 Filter by less than 10% of observations
Outlier.Clust=ClusterSum%>%dplyr::filter(n<dim(DissMat2)[2]*0.1)%>%dplyr::select(Cluster)
#2.3.5 Flag Alim_ID associated only with Outlier.Clust
H.Clust.Outliers=data.frame()
for (i in 1:dim(Outlier.Clust)[1]){
H.Clust.Outliers=rbind.data.frame(H.Clust.Outliers,Output.HClust%>%filter(Cluster==Outlier.Clust[i,1])%>%select(Alim_ID))
}
######2.3.5 OUTPUT OF UN-BALANCED CLUSTER OUTLIERS######
Un.Balanced.Cluster.Outliers=as.character(t(H.Clust.Outliers))
idx=match(Un.Balanced.Cluster.Outliers, names(Output.Period.Outliers))
Output.Un.Balanced.Cluster.Outliers=Output.Period.Outliers[,-idx]
CleanedDataSet=data.frame(MiceImputed[,1],Output.Un.Balanced.Cluster.Outliers)
write.csv(CleanedDataSet, file = "CleanedFeeder.csv", sep=",")
###################################################3. Transform Series into MultipleSeasonal Time Series#########################################
#Multiple Seasonal Adjustment for hourly and weekly seasonalities for TS with frequency=3600 (hourly measurements)
library("aTSA", lib.loc="~/R/win-library/3.5")
library("tseries", lib.loc="~/R/win-library/3.5")
library("TSA", lib.loc="~/R/win-library/3.5")
library("forecast", lib.loc="~/R/win-library/3.5")
#3.1 Noise (Season Adjustment substracting the seasonal pattern)
SDS=data.frame(seasadj(stl(msts(Output.Un.Balanced.Cluster.Outliers[,1], seasonal.periods=c(24,7*24)), "periodic")))
for (i in 2:dim(Output.Un.Balanced.Cluster.Outliers)[2]){
SDS=data.frame(SDS, seasadj(stl(msts(Output.Un.Balanced.Cluster.Outliers[,i], seasonal.periods=c(24,7*24)), "periodic")))
}
SDS=data.frame(MiceImputed[,1], SDS)
names(SDS)=c("DateTime", paste0("Noise_",colnames(Output.Un.Balanced.Cluster.Outliers)))
write.csv(SDS, file = "Noise_Term.csv", sep=",")
#3.2 Seasonal Pattern (Substracting the noice from the obtained TS)
SeasonD=data.frame(Output.Un.Balanced.Cluster.Outliers[,1]-seasadj(stl(msts(Output.Un.Balanced.Cluster.Outliers[,1],
seasonal.periods=c(24,7*24)), "periodic")))
for (i in 2:dim(Output.Un.Balanced.Cluster.Outliers)[2]){
SeasonD=data.frame(SeasonD, Output.Un.Balanced.Cluster.Outliers[,i]-seasadj(stl(msts(Output.Un.Balanced.Cluster.Outliers[,i],
seasonal.periods=c(24,7*24)), "periodic")))
}
SeasonD=data.frame(MiceImputed[,1], SeasonD)
names(SeasonD)=c("DateTime", paste0("Season_",colnames(Output.Un.Balanced.Cluster.Outliers)))
write.csv(SeasonD, file = "Seasonal_Term.csv", sep=",")
#3.3 Noise+Seasonal Data Set
Noise_Season_df=cbind.data.frame(SeasonD, SDS[,-1])
write.csv(Noise_Season_df, file = "Noise_Season_Output.csv", sep=",")
|
library(Lock5withR)
### Name: QuizPulse10
### Title: Quiz vs Lecture Pulse Rates
### Aliases: QuizPulse10
### Keywords: datasets
### ** Examples
data(QuizPulse10)
| /data/genthat_extracted_code/Lock5withR/examples/QuizPulse10.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 172 | r | library(Lock5withR)
### Name: QuizPulse10
### Title: Quiz vs Lecture Pulse Rates
### Aliases: QuizPulse10
### Keywords: datasets
### ** Examples
data(QuizPulse10)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pareto.R
\name{pareto_steps}
\alias{pareto_steps}
\title{Generate weight combinations for running pareto function}
\usage{
pareto_steps(cnames, step = 0.1)
}
\arguments{
\item{cnames}{A vector of constraint names to optimize over e.g. c("Y", "BD")}
\item{step}{The step interval over which to search for optimal solutions
#@param yblist A two element list giving yield modifications}
}
\description{
Generate weight combinations for running pareto function
}
\keyword{internal}
| /man/pareto_steps.Rd | no_license | PrincetonUniversity/agroEcoTradeoff | R | false | true | 558 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pareto.R
\name{pareto_steps}
\alias{pareto_steps}
\title{Generate weight combinations for running pareto function}
\usage{
pareto_steps(cnames, step = 0.1)
}
\arguments{
\item{cnames}{A vector of constraint names to optimize over e.g. c("Y", "BD")}
\item{step}{The step interval over which to search for optimal solutions
#@param yblist A two element list giving yield modifications}
}
\description{
Generate weight combinations for running pareto function
}
\keyword{internal}
|
devtools::load_all("/Users/willwerscheid/GitHub/flashr/")
devtools::load_all("/Users/willwerscheid/GitHub/ebnm/")
library(mashr)
gtex <- readRDS(gzcon(url("https://github.com/stephenslab/gtexresults/blob/master/data/MatrixEQTLSumStats.Portable.Z.rds?raw=TRUE")))
strong <- gtex$strong.z
random <- gtex$random.z
# Step 1. Estimate correlation structure using MASH.
m_random <- mash_set_data(random, Shat = 1)
Vhat <- estimate_null_correlation(m_random)
# Step 2. Estimate data-driven loadings using FLASH.
# Step 2a. Fit Vhat.
n <- nrow(Vhat)
lambda.min <- min(eigen(Vhat, symmetric=TRUE, only.values=TRUE)$values)
data <- flash_set_data(Y, S = sqrt(lambda.min))
W.eigen <- eigen(Vhat - diag(rep(lambda.min, n)), symmetric=TRUE)
# The rank of W is at most n - 1, so we can drop the last eigenval/vec:
W.eigen$values <- W.eigen$values[-n]
W.eigen$vectors <- W.eigen$vectors[, -n, drop=FALSE]
fl <- flash_add_fixed_loadings(data,
LL=W.eigen$vectors,
init_fn="udv_svd",
backfit=FALSE)
ebnm_param_f <- lapply(as.list(W.eigen$values),
function(eigenval) {
list(g = list(a=1/eigenval, pi0=0), fixg = TRUE)
})
ebnm_param_l <- lapply(vector("list", n - 1),
function(k) {list()})
fl <- flash_backfit(data,
fl,
var_type="zero",
ebnm_fn="ebnm_pn",
ebnm_param=(list(f = ebnm_param_f, l = ebnm_param_l)),
nullcheck=FALSE)
# Step 2b. Add nonnegative factors.
ebnm_fn = list(f = "ebnm_pn", l = "ebnm_ash")
ebnm_param = list(f = list(warmstart = TRUE),
l = list(mixcompdist="+uniform"))
fl <- flash_add_greedy(data,
Kmax=50,
f_init=fl,
var_type="zero",
init_fn="udv_svd",
ebnm_fn=ebnm_fn,
ebnm_param=ebnm_param)
saveRDS(fl, "./output/MASHvFLASHVhat/2bGreedy.rds")
# Step 2c (optional). Backfit factors from step 2b.
fl <- flash_backfit(data,
fl,
kset=n:fl$nfactors,
var_type="zero",
ebnm_fn=ebnm_fn,
ebnm_param=ebnm_param,
nullcheck=FALSE)
saveRDS(fl, "./output/MASHvFLASHVhat/2cBackfit.rds")
# Step 2d (optional). Repeat steps 2b and 2c as desired.
fl <- flash_add_greedy(data,
Kmax=50,
f_init=fl,
var_type="zero",
init_fn="udv_svd",
ebnm_fn=ebnm_fn,
ebnm_param=ebnm_param)
fl <- flash_backfit(data,
fl,
kset=n:fl$nfactors,
var_type="zero",
ebnm_fn=ebnm_fn,
ebnm_param=ebnm_param,
nullcheck=FALSE)
saveRDS(fl, "./output/MASHvFLASHVhat/2dRepeat3.rds")
| /code/MASHvFLASHnondiagonalV.R | no_license | willwerscheid/MASHvFLASH | R | false | false | 3,070 | r | devtools::load_all("/Users/willwerscheid/GitHub/flashr/")
devtools::load_all("/Users/willwerscheid/GitHub/ebnm/")
library(mashr)
gtex <- readRDS(gzcon(url("https://github.com/stephenslab/gtexresults/blob/master/data/MatrixEQTLSumStats.Portable.Z.rds?raw=TRUE")))
strong <- gtex$strong.z
random <- gtex$random.z
# Step 1. Estimate correlation structure using MASH.
m_random <- mash_set_data(random, Shat = 1)
Vhat <- estimate_null_correlation(m_random)
# Step 2. Estimate data-driven loadings using FLASH.
# Step 2a. Fit Vhat.
n <- nrow(Vhat)
lambda.min <- min(eigen(Vhat, symmetric=TRUE, only.values=TRUE)$values)
data <- flash_set_data(Y, S = sqrt(lambda.min))
W.eigen <- eigen(Vhat - diag(rep(lambda.min, n)), symmetric=TRUE)
# The rank of W is at most n - 1, so we can drop the last eigenval/vec:
W.eigen$values <- W.eigen$values[-n]
W.eigen$vectors <- W.eigen$vectors[, -n, drop=FALSE]
fl <- flash_add_fixed_loadings(data,
LL=W.eigen$vectors,
init_fn="udv_svd",
backfit=FALSE)
ebnm_param_f <- lapply(as.list(W.eigen$values),
function(eigenval) {
list(g = list(a=1/eigenval, pi0=0), fixg = TRUE)
})
ebnm_param_l <- lapply(vector("list", n - 1),
function(k) {list()})
fl <- flash_backfit(data,
fl,
var_type="zero",
ebnm_fn="ebnm_pn",
ebnm_param=(list(f = ebnm_param_f, l = ebnm_param_l)),
nullcheck=FALSE)
# Step 2b. Add nonnegative factors.
ebnm_fn = list(f = "ebnm_pn", l = "ebnm_ash")
ebnm_param = list(f = list(warmstart = TRUE),
l = list(mixcompdist="+uniform"))
fl <- flash_add_greedy(data,
Kmax=50,
f_init=fl,
var_type="zero",
init_fn="udv_svd",
ebnm_fn=ebnm_fn,
ebnm_param=ebnm_param)
saveRDS(fl, "./output/MASHvFLASHVhat/2bGreedy.rds")
# Step 2c (optional). Backfit factors from step 2b.
fl <- flash_backfit(data,
fl,
kset=n:fl$nfactors,
var_type="zero",
ebnm_fn=ebnm_fn,
ebnm_param=ebnm_param,
nullcheck=FALSE)
saveRDS(fl, "./output/MASHvFLASHVhat/2cBackfit.rds")
# Step 2d (optional). Repeat steps 2b and 2c as desired.
fl <- flash_add_greedy(data,
Kmax=50,
f_init=fl,
var_type="zero",
init_fn="udv_svd",
ebnm_fn=ebnm_fn,
ebnm_param=ebnm_param)
fl <- flash_backfit(data,
fl,
kset=n:fl$nfactors,
var_type="zero",
ebnm_fn=ebnm_fn,
ebnm_param=ebnm_param,
nullcheck=FALSE)
saveRDS(fl, "./output/MASHvFLASHVhat/2dRepeat3.rds")
|
library(dembase)
library(dplyr)
library(docopt)
'
Usage:
migration_test.R [options]
Options:
--last_year_train [default: 2008]
' -> doc
opts <- docopt(doc)
last_year_train <- opts$last_year_train %>% as.integer()
migration <- readRDS("out/migration.rds")
migration_test <- migration %>%
subarray(time > last_year_train)
saveRDS(migration_test,
file = "out/migration_test.rds")
| /src/migration_test.R | permissive | bayesiandemography/iceland_migration | R | false | false | 399 | r |
library(dembase)
library(dplyr)
library(docopt)
'
Usage:
migration_test.R [options]
Options:
--last_year_train [default: 2008]
' -> doc
opts <- docopt(doc)
last_year_train <- opts$last_year_train %>% as.integer()
migration <- readRDS("out/migration.rds")
migration_test <- migration %>%
subarray(time > last_year_train)
saveRDS(migration_test,
file = "out/migration_test.rds")
|
##R Commands
#Question 1
temp<- c(24,15)
temp
convert_fahr_to_cels <- function(temp) {
celsius <- 5/9 * (temp - 32)
return(celsius)
}
cel<- convert_fahr_to_cels(temp)
cel
#Question 2
vec200<- c(1:200)
for (i in 1:200)
{
if((i %% 2) == 0) {
vec200[i]<- 3
}
else {
vec200[i]<- 1
}
}
print(vec200)
#Question 3?
numPerfect<- c(1:2001)
for(i in 1:2001){
p<- 1
while((p*p) <= i){
if((p*p) == i){
numPerfect <- numPerfect+1
p<- p+1
}
}
i<- i+1
}
print(numPerfect)
#Cars and mileage
#Question 1
install.packages('ggplot2')
library(ggplot2)
summary(mpg)
head(mpg)
three<- sort(mpg$hwy, decreasing = TRUE)[1:3]
three
top3 <- mpg[mpg$hwy %in% c(41, 44), ]
top3
#Question 2
head(mpg)
numCompact<-length(which(mpg$class == "compact"))
numCompact
com<- mpg[mpg$class == c('compact')]
mpg$model
#Question 3
x<-mpg$hwy
y<-mpg$cty
plot(x,y, main= "hwy vs city", xlab = "hwy mpg", ylab = "city mpg",)
#Question 4
cars2008<- mpg[mpg$year == 2008, ]
cars2008
cars1999<- mpg[mpg$year == 1999,]
cars1999
summary(cars2008)
summary(cars1999)
summary(mpg)
x2<-cars2008$hwy
y2<-cars1999$hwy
plot(x2,y2, main= "2008 vs 1999 hwy mpg", xlab = "2008 hwy mpg", ylab = "1999 hwy mpg",)
x3<-cars2008$cty
y3<-cars1999$cty
plot(x3,y3, main= "2008 vs 1999 cty mpg", xlab = "2008 cty mpg", ylab = "1999 cty mpg",)
str(mpg)
| /hw2.R | no_license | sthomas20/ds202_hw2 | R | false | false | 1,356 | r | ##R Commands
#Question 1
temp<- c(24,15)
temp
convert_fahr_to_cels <- function(temp) {
celsius <- 5/9 * (temp - 32)
return(celsius)
}
cel<- convert_fahr_to_cels(temp)
cel
#Question 2
vec200<- c(1:200)
for (i in 1:200)
{
if((i %% 2) == 0) {
vec200[i]<- 3
}
else {
vec200[i]<- 1
}
}
print(vec200)
#Question 3?
numPerfect<- c(1:2001)
for(i in 1:2001){
p<- 1
while((p*p) <= i){
if((p*p) == i){
numPerfect <- numPerfect+1
p<- p+1
}
}
i<- i+1
}
print(numPerfect)
#Cars and mileage
#Question 1
install.packages('ggplot2')
library(ggplot2)
summary(mpg)
head(mpg)
three<- sort(mpg$hwy, decreasing = TRUE)[1:3]
three
top3 <- mpg[mpg$hwy %in% c(41, 44), ]
top3
#Question 2
head(mpg)
numCompact<-length(which(mpg$class == "compact"))
numCompact
com<- mpg[mpg$class == c('compact')]
mpg$model
#Question 3
x<-mpg$hwy
y<-mpg$cty
plot(x,y, main= "hwy vs city", xlab = "hwy mpg", ylab = "city mpg",)
#Question 4
cars2008<- mpg[mpg$year == 2008, ]
cars2008
cars1999<- mpg[mpg$year == 1999,]
cars1999
summary(cars2008)
summary(cars1999)
summary(mpg)
x2<-cars2008$hwy
y2<-cars1999$hwy
plot(x2,y2, main= "2008 vs 1999 hwy mpg", xlab = "2008 hwy mpg", ylab = "1999 hwy mpg",)
x3<-cars2008$cty
y3<-cars1999$cty
plot(x3,y3, main= "2008 vs 1999 cty mpg", xlab = "2008 cty mpg", ylab = "1999 cty mpg",)
str(mpg)
|
/1. Intro a R/Ejemplos/4. Machine Learning con R (primera parte).R | no_license | Logicus03/EOI_Artificial_Intelligence | R | false | false | 2,397 | r | ||
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of RadiologyFeatureExtraction
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Build autoencoder
#' @param x An object
#' @param encoderSettings
#' @param imageProcessingSettings
#' @param outputFolder
#'
#' @export
buildEncoder <- function (trainData,
valData,
encoderSettings,
imageProcessingSettings,
outputFolder){
fun <- encoderSettings$model
args <- list(encoderParam = encoderSettings$param,
imageProcessingSettings = imageProcessingSettings)
encoderModel <- do.call(fun,args)
return (encoderModel)
}
| /R/buildEncoder.r | no_license | ABMI/RadiologyFeatureExtraction | R | false | false | 1,250 | r | # Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of RadiologyFeatureExtraction
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Build autoencoder
#' @param x An object
#' @param encoderSettings
#' @param imageProcessingSettings
#' @param outputFolder
#'
#' @export
buildEncoder <- function (trainData,
valData,
encoderSettings,
imageProcessingSettings,
outputFolder){
fun <- encoderSettings$model
args <- list(encoderParam = encoderSettings$param,
imageProcessingSettings = imageProcessingSettings)
encoderModel <- do.call(fun,args)
return (encoderModel)
}
|
#' Hello World
#'
#' Basic hello world function to be called from the demo app
#'
#' @export
#' @param myname your name. Required.
require(Seurat)
hello <- function(myname = ""){
pbmc.data <- Read10X(data.dir = "C:\\hg19")
if(myname == ""){
stop("Tell me your name!")
}
list(
message = paste("hello", myname, "! This is", R.Version()$version.string)
)
}
| /R/hello.R | no_license | goodhen2/vue | R | false | false | 382 | r | #' Hello World
#'
#' Basic hello world function to be called from the demo app
#'
#' @export
#' @param myname your name. Required.
require(Seurat)
hello <- function(myname = ""){
pbmc.data <- Read10X(data.dir = "C:\\hg19")
if(myname == ""){
stop("Tell me your name!")
}
list(
message = paste("hello", myname, "! This is", R.Version()$version.string)
)
}
|
/WaterBalance_module/Functions/Water_Balance_funcs.R | permissive | shekharsg/MITERRA-PORTUGAL | R | false | false | 16,591 | r | ||
### Haplotyping pipeline for HLA given minION fastq reads ###
# ------------------------------------ MODULE ACTIVATION -------------------------
#module load samtools
#module load minimap2
#module load flye
#----------------------------------------------------------------------------------
minimap=FALSE
samtools=FALSE
pmd=FALSE
flye=TRUE
arg=commandArgs(trailingOnly = TRUE)
FASTQ=arg[1]
prefix=sub("\\..*", "", FASTQ)
#you have to discover how to move around
# Set up input data
REFERENCE= "chr6.fa"
SAM = paste0(prefix,".sam")
BAM = paste0(prefix,".bam")
SORTED_BAM = paste0(prefix,".sorted.bam")
if (is.na(arg[2])) {
arg[2]="output"
}
OUTPUT_DIR = arg[2]
VCF = paste0(prefix,".vcf")
# Set the number of CPUs to use
THREADS="12"
paste0('fastq file --> ', FASTQ)
paste0('prefix --> ', prefix)
paste0('SAM file --> ', SAM)
paste0('BAM file --> ', BAM)
paste0('output dir --> ', OUTPUT_DIR)
paste0('VCF file --> ',VCF)
if(minimap){
#mapping against the reference
system(paste0("echo ---------------- Mapping with minimap2 [1/4] ---------------- "))
system(paste0("minimap2 -a -z 600,200 -x map-ont ", REFERENCE , " ", FASTQ, " >",SAM))
}
if(samtools){
system(paste0("echo ---------------- Samtools indexing and sorting [2/4] ---------------- "))
#data conversion and indexinx with samtools
system(paste0("samtools view -bS ",SAM, " > ",BAM)) #convert .sam>.bam
system(paste0("rm ",SAM))
system(paste0("samtools sort ",BAM," -o ", SORTED_BAM)) #sort the .bam file
system(paste0("samtools index ",SORTED_BAM)) #index the sorted .bam file
}
if(pmd){
#from now pepper-margin-deepvariant
prefix=paste0(prefix,'_')
HLA.bam=paste0(prefix,"HLA.bam")
# The pull command creates pepper_deepvariant_r0.4.sif file locally
#system(paste0("singularity pull docker://kishwars/pepper_deepvariant:r0.4"))
system(paste0("samtools view -bS",SORTED_BAM," chr6:29940532-31355875 >",HLA.bam)) #select only the HLA genes
system(paste0("echo ---------------- Executing Pepper-Margin-Deepvariant [3/4] ---------------- "))
system(paste0("singularity exec --bind /usr/lib/locale/ \
pepper_deepvariant_r0.4.sif \
run_pepper_margin_deepvariant call_variant \
-b ",HLA.bam , " \
--phased_output \
-f ", REFERENCE," \
-o ", OUTPUT_DIR, " \
-p ", prefix, " \
-t ${",THREADS,"} \
--ont"))
}
#From here haplotyping with de-novo assembly with flye
if(flye){
HAPLOTAGGED.bam=list.files(paste0(OUTPUT_DIR, "/intermediate_files"), pattern="*MARGIN_PHASED.PEPPER_SNP_MARGIN.haplotagged.bam", full.names=TRUE)[1]
HLA_A.bam=paste0(prefix,"_HLA_A.bam")
HLA_B.bam=paste0(prefix,"_HLA_B.bam")
HLA_C.bam=paste0(prefix,"_HLA_C.bam")
#here I create subset of the haplotagged bam for each gene
system(paste0("samtools view -bS ", HAPLOTAGGED.bam," chr6:29941532-29946870 >",HLA_A.bam))
system(paste0("samtools view -bS ", HAPLOTAGGED.bam," chr6:31352875-31358179 >",HLA_B.bam))
system(paste0("samtools view -bS ", HAPLOTAGGED.bam," chr6:31267749-31273092 >",HLA_C.bam))
#31353872-31357188
#mica
#then I execute flye for each gene
system(paste0("echo ---------------- Executing Flye [4/4] ---------------- "))
#split the haplotypes
system(paste0("bamtools split -in ", HLA_A.bam, " -tag HP"))
system(paste0("bamtools split -in ", HLA_B.bam, " -tag HP"))
system(paste0("bamtools split -in ", HLA_C.bam, " -tag HP"))
#extract prefixes
flye_prefixA=sub("\\..*", "", HLA_A.bam)
flye_prefixB=sub("\\..*", "", HLA_B.bam)
flye_prefixC=sub("\\..*", "", HLA_C.bam)
#names of .fa files
A1=paste0(prefix,'A1.fa')
A2=paste0(prefix,'A2.fa')
B1=paste0(prefix,'B1.fa')
B2=paste0(prefix,'B2.fa')
C1=paste0(prefix,'C1.fa')
C2=paste0(prefix,'C2.fa')
#convert each haplotype from .bam to .fa
system(paste0("samtools bam2fq ", flye_prefixA, ".TAG_HP_1.bam | seqtk seq -A > ", A1))
system(paste0("samtools bam2fq ", flye_prefixA, ".TAG_HP_2.bam | seqtk seq -A > ", A2))
system(paste0("samtools bam2fq ", flye_prefixB, ".TAG_HP_1.bam | seqtk seq -A > ", B1))
system(paste0("samtools bam2fq ", flye_prefixB, ".TAG_HP_2.bam | seqtk seq -A > ", B2))
system(paste0("samtools bam2fq ", flye_prefixC, ".TAG_HP_1.bam | seqtk seq -A > ", C1))
system(paste0("samtools bam2fq ", flye_prefixC, ".TAG_HP_2.bam | seqtk seq -A > ", C2))
}
if(FALSE){
#out dirs
oA1=paste0(OUTPUT_DIR,"/flyeA1/")
oA2=paste0(OUTPUT_DIR,"/flyeA2/")
oB1=paste0(OUTPUT_DIR,"/flyeB1/")
oB2=paste0(OUTPUT_DIR,"/flyeB2/")
oC1=paste0(OUTPUT_DIR,"/flyeC1/")
oC2=paste0(OUTPUT_DIR,"/flyeC2/")
#execute de-novo assembly with flye
system(paste0("flye --nano-raw ", A1, " --out-dir ", oA1, "--threads 4 -m 1000"))
system(paste0("flye --nano-raw ", A2, " --out-dir ", oA2, "--threads 4 -m 1000"))
system(paste0("flye --nano-raw ", B1, " --out-dir ", oB1, "--threads 4 -m 1000"))
system(paste0("flye --nano-raw ", B2, " --out-dir ", oB2, "--threads 4 -m 1000"))
system(paste0("flye --nano-raw ", C1, " --out-dir ", oC1, "--threads 4 -m 1000"))
system(paste0("flye --nano-raw ", C2, " --out-dir ", oC2, "--threads 4 -m 1000"))
}
system(paste0("echo ---------------- Finished ---------------- "))
| /fullHaplo_HLA.R | no_license | davidecanevazzi/Haplotyping-HLA | R | false | false | 5,491 | r | ### Haplotyping pipeline for HLA given minION fastq reads ###
# ------------------------------------ MODULE ACTIVATION -------------------------
#module load samtools
#module load minimap2
#module load flye
#----------------------------------------------------------------------------------
minimap=FALSE
samtools=FALSE
pmd=FALSE
flye=TRUE
arg=commandArgs(trailingOnly = TRUE)
FASTQ=arg[1]
prefix=sub("\\..*", "", FASTQ)
#you have to discover how to move around
# Set up input data
REFERENCE= "chr6.fa"
SAM = paste0(prefix,".sam")
BAM = paste0(prefix,".bam")
SORTED_BAM = paste0(prefix,".sorted.bam")
if (is.na(arg[2])) {
arg[2]="output"
}
OUTPUT_DIR = arg[2]
VCF = paste0(prefix,".vcf")
# Set the number of CPUs to use
THREADS="12"
paste0('fastq file --> ', FASTQ)
paste0('prefix --> ', prefix)
paste0('SAM file --> ', SAM)
paste0('BAM file --> ', BAM)
paste0('output dir --> ', OUTPUT_DIR)
paste0('VCF file --> ',VCF)
if(minimap){
#mapping against the reference
system(paste0("echo ---------------- Mapping with minimap2 [1/4] ---------------- "))
system(paste0("minimap2 -a -z 600,200 -x map-ont ", REFERENCE , " ", FASTQ, " >",SAM))
}
if(samtools){
system(paste0("echo ---------------- Samtools indexing and sorting [2/4] ---------------- "))
#data conversion and indexinx with samtools
system(paste0("samtools view -bS ",SAM, " > ",BAM)) #convert .sam>.bam
system(paste0("rm ",SAM))
system(paste0("samtools sort ",BAM," -o ", SORTED_BAM)) #sort the .bam file
system(paste0("samtools index ",SORTED_BAM)) #index the sorted .bam file
}
if(pmd){
#from now pepper-margin-deepvariant
prefix=paste0(prefix,'_')
HLA.bam=paste0(prefix,"HLA.bam")
# The pull command creates pepper_deepvariant_r0.4.sif file locally
#system(paste0("singularity pull docker://kishwars/pepper_deepvariant:r0.4"))
system(paste0("samtools view -bS",SORTED_BAM," chr6:29940532-31355875 >",HLA.bam)) #select only the HLA genes
system(paste0("echo ---------------- Executing Pepper-Margin-Deepvariant [3/4] ---------------- "))
system(paste0("singularity exec --bind /usr/lib/locale/ \
pepper_deepvariant_r0.4.sif \
run_pepper_margin_deepvariant call_variant \
-b ",HLA.bam , " \
--phased_output \
-f ", REFERENCE," \
-o ", OUTPUT_DIR, " \
-p ", prefix, " \
-t ${",THREADS,"} \
--ont"))
}
#From here haplotyping with de-novo assembly with flye
if(flye){
HAPLOTAGGED.bam=list.files(paste0(OUTPUT_DIR, "/intermediate_files"), pattern="*MARGIN_PHASED.PEPPER_SNP_MARGIN.haplotagged.bam", full.names=TRUE)[1]
HLA_A.bam=paste0(prefix,"_HLA_A.bam")
HLA_B.bam=paste0(prefix,"_HLA_B.bam")
HLA_C.bam=paste0(prefix,"_HLA_C.bam")
#here I create subset of the haplotagged bam for each gene
system(paste0("samtools view -bS ", HAPLOTAGGED.bam," chr6:29941532-29946870 >",HLA_A.bam))
system(paste0("samtools view -bS ", HAPLOTAGGED.bam," chr6:31352875-31358179 >",HLA_B.bam))
system(paste0("samtools view -bS ", HAPLOTAGGED.bam," chr6:31267749-31273092 >",HLA_C.bam))
#31353872-31357188
#mica
#then I execute flye for each gene
system(paste0("echo ---------------- Executing Flye [4/4] ---------------- "))
#split the haplotypes
system(paste0("bamtools split -in ", HLA_A.bam, " -tag HP"))
system(paste0("bamtools split -in ", HLA_B.bam, " -tag HP"))
system(paste0("bamtools split -in ", HLA_C.bam, " -tag HP"))
#extract prefixes
flye_prefixA=sub("\\..*", "", HLA_A.bam)
flye_prefixB=sub("\\..*", "", HLA_B.bam)
flye_prefixC=sub("\\..*", "", HLA_C.bam)
#names of .fa files
A1=paste0(prefix,'A1.fa')
A2=paste0(prefix,'A2.fa')
B1=paste0(prefix,'B1.fa')
B2=paste0(prefix,'B2.fa')
C1=paste0(prefix,'C1.fa')
C2=paste0(prefix,'C2.fa')
#convert each haplotype from .bam to .fa
system(paste0("samtools bam2fq ", flye_prefixA, ".TAG_HP_1.bam | seqtk seq -A > ", A1))
system(paste0("samtools bam2fq ", flye_prefixA, ".TAG_HP_2.bam | seqtk seq -A > ", A2))
system(paste0("samtools bam2fq ", flye_prefixB, ".TAG_HP_1.bam | seqtk seq -A > ", B1))
system(paste0("samtools bam2fq ", flye_prefixB, ".TAG_HP_2.bam | seqtk seq -A > ", B2))
system(paste0("samtools bam2fq ", flye_prefixC, ".TAG_HP_1.bam | seqtk seq -A > ", C1))
system(paste0("samtools bam2fq ", flye_prefixC, ".TAG_HP_2.bam | seqtk seq -A > ", C2))
}
if(FALSE){
#out dirs
oA1=paste0(OUTPUT_DIR,"/flyeA1/")
oA2=paste0(OUTPUT_DIR,"/flyeA2/")
oB1=paste0(OUTPUT_DIR,"/flyeB1/")
oB2=paste0(OUTPUT_DIR,"/flyeB2/")
oC1=paste0(OUTPUT_DIR,"/flyeC1/")
oC2=paste0(OUTPUT_DIR,"/flyeC2/")
#execute de-novo assembly with flye
system(paste0("flye --nano-raw ", A1, " --out-dir ", oA1, "--threads 4 -m 1000"))
system(paste0("flye --nano-raw ", A2, " --out-dir ", oA2, "--threads 4 -m 1000"))
system(paste0("flye --nano-raw ", B1, " --out-dir ", oB1, "--threads 4 -m 1000"))
system(paste0("flye --nano-raw ", B2, " --out-dir ", oB2, "--threads 4 -m 1000"))
system(paste0("flye --nano-raw ", C1, " --out-dir ", oC1, "--threads 4 -m 1000"))
system(paste0("flye --nano-raw ", C2, " --out-dir ", oC2, "--threads 4 -m 1000"))
}
system(paste0("echo ---------------- Finished ---------------- "))
|
target_list <- list.files(path="/ebc_data/awwohns/selection/getting_1000g_refs/allele_freqs/allelefreqs", pattern = "\\.frq$", full.names=TRUE)
thousandg_list <- list.files(path="/ebc_data/awwohns/selection/getting_1000g_refs/gbr_vcfs/gbr_chrs", pattern = "\\.vcf$", full.names=TRUE)
file_list <- list()
for (i in 1:22) {
#file_list[[i]] <- target_list[grep(paste0("/",i,"_"),target_list)]
file_list[[i]] <- cbind(target_list[grep(paste0("chr",i,"\\."),target_list)], thousandg_list[grep(paste0("chr",i,"\\."),thousandg_list)])
#print(target_list[grep(paste0("^",i,"_"),target_list)])
#print(thousandg_list[grep(paste0("chr",i,"\\."),thousandg_list)])
#print("onelinedone")
}
print(target_list)
df <- data.frame(matrix(unlist(file_list), nrow=22, byrow=T))
write.table(df, "rsidsandfreqs.txt", sep="\t",row.names=FALSE,col.names=FALSE)
| /list_of_files/preplist.R | no_license | awohns/withrsid | R | false | false | 879 | r | target_list <- list.files(path="/ebc_data/awwohns/selection/getting_1000g_refs/allele_freqs/allelefreqs", pattern = "\\.frq$", full.names=TRUE)
thousandg_list <- list.files(path="/ebc_data/awwohns/selection/getting_1000g_refs/gbr_vcfs/gbr_chrs", pattern = "\\.vcf$", full.names=TRUE)
file_list <- list()
for (i in 1:22) {
#file_list[[i]] <- target_list[grep(paste0("/",i,"_"),target_list)]
file_list[[i]] <- cbind(target_list[grep(paste0("chr",i,"\\."),target_list)], thousandg_list[grep(paste0("chr",i,"\\."),thousandg_list)])
#print(target_list[grep(paste0("^",i,"_"),target_list)])
#print(thousandg_list[grep(paste0("chr",i,"\\."),thousandg_list)])
#print("onelinedone")
}
print(target_list)
df <- data.frame(matrix(unlist(file_list), nrow=22, byrow=T))
write.table(df, "rsidsandfreqs.txt", sep="\t",row.names=FALSE,col.names=FALSE)
|
#' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
null <- function(...) invisible()
klass <- function(x) paste(class(x), collapse = "/")
# Tools for finding srcrefs -----------------------------------------------
find_first_srcref <- function(start) {
calls <- sys.calls()
calls <- calls[seq2(start, length(calls))]
for (call in calls) {
srcref <- attr(call, "srcref")
if (!is.null(srcref)) {
return(srcref)
}
}
NULL
}
escape_regex <- function(x) {
chars <- c("*", ".", "?", "^", "+", "$", "|", "(", ")", "[", "]", "{", "}", "\\")
gsub(paste0("([\\", paste0(collapse = "\\", chars), "])"), "\\\\\\1", x, perl = TRUE)
}
# For R 3.1
dir.exists <- function(paths) {
file.exists(paths) & file.info(paths)$isdir
}
maybe_restart <- function(restart) {
if (!is.null(findRestart(restart))) {
invokeRestart(restart)
}
}
# Backport for R 3.2
strrep <- function(x, times) {
x = as.character(x)
if (length(x) == 0L)
return(x)
unlist(.mapply(function(x, times) {
if (is.na(x) || is.na(times))
return(NA_character_)
if (times <= 0L)
return("")
paste0(replicate(times, x), collapse = "")
}, list(x = x, times = times), MoreArgs = list()), use.names = FALSE)
}
can_entrace <- function(cnd) {
!inherits(cnd, "Throwable")
}
# Need to strip environment and source references to make lightweight
# function suitable to send to another process
transport_fun <- function(f) {
environment(f) <- .GlobalEnv
f <- zap_srcref(f)
f
}
isNA <- function(x) length(x) == 1 && is.na(x)
compact <- function(x) {
x[lengths(x) > 0]
}
# Handled specially in test_code so no backtrace
testthat_warn <- function(message, ...) {
warn(message, class = "testthat_warn", ...)
}
split_by_line <- function(x) {
trailing_nl <- grepl("\n$", x)
x <- strsplit(x, "\n")
x[trailing_nl] <- lapply(x[trailing_nl], c, "")
x
}
rstudio_tickle <- function() {
if (!is_installed("rstudioapi")) {
return()
}
if (!rstudioapi::hasFun("executeCommand")) {
return()
}
rstudioapi::executeCommand("vcsRefresh")
rstudioapi::executeCommand("refreshFiles")
}
check_installed <- function(pkg, fun) {
if (is_installed(pkg)) {
return()
}
abort(c(
paste0("The ", pkg, " package must be installed in order to use `", fun, "`"),
i = paste0("Do you need to run `install.packages('", pkg, "')`?")
))
}
first_upper <- function(x) {
substr(x, 1, 1) <- toupper(substr(x, 1, 1))
x
}
in_rcmd_check <- function() {
nzchar(Sys.getenv("_R_CHECK_PACKAGE_NAME_", ""))
}
| /R/utils.R | permissive | Tubbz-alt/testthat | R | false | false | 2,556 | r | #' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
null <- function(...) invisible()
klass <- function(x) paste(class(x), collapse = "/")
# Tools for finding srcrefs -----------------------------------------------
find_first_srcref <- function(start) {
calls <- sys.calls()
calls <- calls[seq2(start, length(calls))]
for (call in calls) {
srcref <- attr(call, "srcref")
if (!is.null(srcref)) {
return(srcref)
}
}
NULL
}
escape_regex <- function(x) {
chars <- c("*", ".", "?", "^", "+", "$", "|", "(", ")", "[", "]", "{", "}", "\\")
gsub(paste0("([\\", paste0(collapse = "\\", chars), "])"), "\\\\\\1", x, perl = TRUE)
}
# For R 3.1
dir.exists <- function(paths) {
file.exists(paths) & file.info(paths)$isdir
}
maybe_restart <- function(restart) {
if (!is.null(findRestart(restart))) {
invokeRestart(restart)
}
}
# Backport for R 3.2
strrep <- function(x, times) {
x = as.character(x)
if (length(x) == 0L)
return(x)
unlist(.mapply(function(x, times) {
if (is.na(x) || is.na(times))
return(NA_character_)
if (times <= 0L)
return("")
paste0(replicate(times, x), collapse = "")
}, list(x = x, times = times), MoreArgs = list()), use.names = FALSE)
}
can_entrace <- function(cnd) {
!inherits(cnd, "Throwable")
}
# Need to strip environment and source references to make lightweight
# function suitable to send to another process
transport_fun <- function(f) {
environment(f) <- .GlobalEnv
f <- zap_srcref(f)
f
}
isNA <- function(x) length(x) == 1 && is.na(x)
compact <- function(x) {
x[lengths(x) > 0]
}
# Handled specially in test_code so no backtrace
testthat_warn <- function(message, ...) {
warn(message, class = "testthat_warn", ...)
}
split_by_line <- function(x) {
trailing_nl <- grepl("\n$", x)
x <- strsplit(x, "\n")
x[trailing_nl] <- lapply(x[trailing_nl], c, "")
x
}
rstudio_tickle <- function() {
if (!is_installed("rstudioapi")) {
return()
}
if (!rstudioapi::hasFun("executeCommand")) {
return()
}
rstudioapi::executeCommand("vcsRefresh")
rstudioapi::executeCommand("refreshFiles")
}
check_installed <- function(pkg, fun) {
if (is_installed(pkg)) {
return()
}
abort(c(
paste0("The ", pkg, " package must be installed in order to use `", fun, "`"),
i = paste0("Do you need to run `install.packages('", pkg, "')`?")
))
}
first_upper <- function(x) {
substr(x, 1, 1) <- toupper(substr(x, 1, 1))
x
}
in_rcmd_check <- function() {
nzchar(Sys.getenv("_R_CHECK_PACKAGE_NAME_", ""))
}
|
## The first function, `makeVector` creates a special "matrix", which is
##really a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse matrix
## 4. get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## The following function calculates the inverse of the special "matrix"
## created with the above function. It first checks to see if the
## inverse matrix has already been calculated. If so, it `get`s the inverse
## matrix from the cache and skips the computation. Otherwise, it calculates the mean of
## the data and sets the value of the inverted matrix in the cache via the
##`setsolve` function.
cacheSolve <- function(x = matrix(), ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
| /cachematrix.R | no_license | CesarMaalouf/ProgrammingAssignment2 | R | false | false | 1,384 | r | ## The first function, `makeVector` creates a special "matrix", which is
##really a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse matrix
## 4. get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## The following function calculates the inverse of the special "matrix"
## created with the above function. It first checks to see if the
## inverse matrix has already been calculated. If so, it `get`s the inverse
## matrix from the cache and skips the computation. Otherwise, it calculates the mean of
## the data and sets the value of the inverted matrix in the cache via the
##`setsolve` function.
cacheSolve <- function(x = matrix(), ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
|
####################################################################################################################
##
## SECOND-LEVEL META-ANALYSIS
##
###################################################################################################################
# This script reproduces the results of the meta-analysis testing the effect of covariates on
# decomposers and decompositionr esponses to stressors and nutrients
## 1. LOAD Data---------------------------------------------------------------------------
source("0201_LOAD_Data.R")
## 2. MODELS------------------------------------------------------------------------------
## Run second-level meta-analyses of decomposers responses (diversity and abundance)
# with metafor to derive confidence intervals, QM stats and p-value
# and using all the data (no need for data resampling in this univariate approach)
# these models correspond to the submodels for biodiversity and abundance responses in the piecewise SEMs
modelrma_polbef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max +
study_type + taxonomic.group + B.metric,
random = ~ 1 | Case.study / ID,
data = pol_BEF_es)
modelrma_poldef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max +
study_type + taxonomic.group,
random = ~ 1 | Case.study / ID,
data = pol_DEF_es)
modelrma_nutbef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max +
study_type + taxonomic.group + B.metric,
random = ~ 1 | Case.study / ID,
data = nut_BEF_es)
modelrma_nutdef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max +
study_type + taxonomic.group,
random = ~ 1 | Case.study / ID,
data = nut_DEF_es)
# as all factors have only two levels, the pvalues in the summary test the significance of the factor overall
# taxonomic.group effect
resanova_taxo <- data.frame(rbind(
pol_bef = as.numeric(c(anova(modelrma_polbef, btt = 4)[1:2])),
pol_def = as.numeric(c(anova(modelrma_poldef, btt = 4)[1:2])),
nut_bef = as.numeric(c(anova(modelrma_nutbef, btt = 4)[1:2])),
nut_def = as.numeric(c(anova(modelrma_nutdef, btt = 4)[1:2]))))
resanova_taxo$Predictor <- rep("Taxonomic group", 4)
resanova_taxo$Response <- rep(c("Diversity", "Abundance"), 2)
resanova_taxo$ECD.type <- c(rep("Stressors", 2), rep("Nutrients", 2))
# study type effect
resanova_study <- data.frame(rbind(
pol_bef = as.numeric(c(anova(modelrma_polbef, btt = 3)[1:2])),
pol_def = as.numeric(c(anova(modelrma_poldef, btt = 3)[1:2])),
nut_bef = as.numeric(c(anova(modelrma_nutbef, btt = 3)[1:2])),
nut_def = as.numeric(c(anova(modelrma_nutdef, btt = 3)[1:2]))))
resanova_study$Predictor <- rep("Study type", 4)
resanova_study$Response <- rep(c("Diversity", "Abundance"), 2)
resanova_study$ECD.type <- c(rep("Stressors", 2), rep("Nutrients", 2))
# B.metric effect
resanova_Bmetric <- data.frame(rbind(
pol_bef = as.numeric(c(anova(modelrma_polbef, btt = 5)[1:2])),
pol_def = rep(NA, 2),
nut_bef = as.numeric(c(anova(modelrma_nutbef, btt = 5)[1:2])),
nut_def = rep(NA, 2)))
resanova_Bmetric$Predictor <- rep("Diversity metric", 4)
resanova_Bmetric$Response <- rep(c("Diversity", "Abundance"), 2)
resanova_Bmetric$ECD.type <- c(rep("Stressors", 2), rep("Nutrients", 2))
names(resanova_taxo)[c(1,2)] <- names(resanova_study)[c(1,2)] <- names(resanova_Bmetric)[c(1,2)] <- c("QM", "P")
print(resanova_taxo)
print(resanova_study)
print(resanova_Bmetric)
# save results
write.csv(resanova_taxo, "tables/ResMATaxo.csv")
write.csv(resanova_study, "tables/ResMAStudy.csv")
write.csv(resanova_Bmetric, "tables/ResMABmetric.csv")
# second-level meta-analyses for decomposition: remove duplicates ES on decompo
modelrmald_polbef <- rma.mv(zcor.ECD.LD, var.zcor.ECD.LD,
mods = ~ zcor.ECD.B + ECD_max +
study_type,
random = ~ 1 | Case.study / ID,
data = pol_BEF_es[!duplicated(pol_BEF_es$clusterID_LD),])
modelrmald_poldef <- rma.mv(zcor.ECD.LD, var.zcor.ECD.LD,
mods = ~ zcor.ECD.B+ ECD_max +
study_type,
random = ~ 1 | Case.study / ID,
data = pol_DEF_es[!duplicated(pol_DEF_es$clusterID_LD),])
modelrmald_nutbef <- rma.mv(zcor.ECD.LD, var.zcor.ECD.LD,
mods = ~ zcor.ECD.B+ ECD_max +
study_type,
random = ~ 1 | Case.study / ID,
data = nut_BEF_es[!duplicated(nut_BEF_es$clusterID_LD),])
modelrmald_nutdef <- rma.mv(zcor.ECD.LD, var.zcor.ECD.LD,
mods = ~ zcor.ECD.B+ ECD_max +
study_type,
random = ~ 1 | Case.study / ID,
data = nut_DEF_es[!duplicated(nut_DEF_es$clusterID_LD),])
# as all factors have only two levels, the pvalues in the summary test the significance of the factor
# study type effect
resanovaLD_study <- data.frame(rbind(
pol_bef = as.numeric(c(anova(modelrmald_polbef, btt = 4)[1:2])),
pol_def = as.numeric(c(anova(modelrmald_poldef, btt = 4)[1:2])),
nut_bef = as.numeric(c(anova(modelrmald_nutbef, btt = 4)[1:2])),
nut_def = as.numeric(c(anova(modelrmald_nutdef, btt = 4)[1:2]))))
resanovaLD_study$Predictor <- rep("Study type", 4)
resanovaLD_study$Response <- rep(c("Decomposition (Div)", "Decomposition (Abd)"), 2)
resanovaLD_study$ECD.type <- c(rep("Stressors", 2), rep("Nutrients", 2))
names(resanovaLD_study)[c(1,2)] <- c("QM", "P")
print(resanovaLD_study)
write.csv(resanovaLD_study, "tables/ResMALDstudy.csv")
## 3. FIGURE Stressor and nutrient intensity effects-----------------------------------------
# This code creates Figure 6 of the manuscript
colo_lea <- c("#0072B2", "#D55E00")
# set sizes of plotted elements
sizetext <- 12
sizelegend <- 11
sizepoint <- 1
sizeline <- 0.8
sizeannotate <- 3.4
## Function to create panel for each outcome
# Panels LD
myfun_LD_ECD <- function(dat, plottitle, mycol){
dat$weight = 1/dat$var.zcor.ECD.LD
## calculate slope B-EF with a meta-regression
slope <- rma.mv(zcor.ECD.LD, var.zcor.ECD.LD,
mods = ~ ECD_max + study_type,
random = ~ 1 | Case.study/ID,
data = dat)
# calculate confidence interval around the slope
predframe <- with(dat,
data.frame(zcor.ECD.LD, ECD_max,
preds = predict(slope)$pred,
lwr = predict(slope)$ci.lb,
upr = predict(slope)$ci.ub))
# extract statistics
QMstat <- round(anova(slope, btt=2)[1]$QM, 1)
QMpval <- round(anova(slope, btt=2)[2]$QMp, 2)
nstud <- slope$s.nlevels[1]
nobs <- slope$k
# bquote to annotate the plot with stats and sample sizes
labelstatq <- bquote(QM[df == 1] == .(QMstat))
labelstatp <- if(QMpval>0.001) {bquote(p ==.(QMpval))}else{ bquote(p < 0.001)}
labelstatns <- bquote((.(nstud) ~ ";" ~ .(nobs)))
labelstat <- bquote(list(.(labelstatq), .(labelstatp), .(labelstatns)))
## plot
ggplot(dat, aes(x=ECD_max, y=zcor.ECD.LD,
size = weight)) +
geom_point(col = mycol, pch = 1)+ #, size = sizepoint) +
# titles and axis labels
ylab("Effect on decomposition")+
xlab(if( dat$ECD.type[1]=="Stressors"){"Stressor intensity"} else{"Nutrient intensity"})+
ggtitle(plottitle) +
# axis lenght
# ylim(c(-3.1,2.6))+
xlim(c(-2.2,10.1))+
# mark the zeros lines
geom_hline(yintercept = 0, color = "black")+
geom_vline(xintercept = 0, color = "black")+
# annotate with stats and sample sizes
annotate('label', x = 10.1, y = min(dat$zcor.ECD.LD)- 0.7, hjust = 1,
label=deparse(labelstat), parse = TRUE, size = sizeannotate, , fill = "white", label.size = NA)+
# add slope if significant and conf intervals
# add sloeps and conf intervals
geom_abline(slope = slope$b[2], intercept = slope$b[1], col = mycol, size = sizeline,
linetype = 1+ifelse(anova(slope, btt=2)[2]$QMp>0.05, 1, 0))+ # change lty according to p-value of the meta-regression
# theme stff
theme_bw() +
theme(
axis.text.y = element_text(face = "bold", size = sizetext),
axis.text.x = element_text(face = "bold", size = sizetext),
axis.title.x = element_text(size = sizetext, face = "bold"),
axis.title.y = element_text(size = sizetext, face = "bold"),
#legend
plot.title = element_text(size = sizetext),
legend.position = "none")
}
# Panels B
myfun_B_ECD <- function(dat, plottitle, mycol, xaxis){
dat$weight = 1/dat$var.zcor.ECD.B
## calculate slope B-EF with a meta-regression
if( plottitle=="Abundance"){
slope <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max + study_type + taxonomic.group,
random = ~ 1 | Case.study/ID,
data = dat)}
else{slope <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max + study_type + taxonomic.group + B.metric,
random = ~ 1 | Case.study/ID,
data = dat)}
# calculate confidence interval around the slope
predframe <- with(dat,
data.frame(zcor.ECD.B, ECD_max,
preds = predict(slope)$pred,
lwr = predict(slope)$ci.lb,
upr = predict(slope)$ci.ub))
# extract statistics
QMstat <- round(anova(slope, btt=2)[1]$QM, 1)
QMpval <- round(anova(slope, btt=2)[2]$QMp, 2)
nstud <- slope$s.nlevels[1]
nobs <- slope$k
# bquote to annotate the plot with stats and sample sizes
labelstatq <- bquote(QM[df == 1] == .(QMstat))
labelstatp <- if(QMpval>0.001) {bquote(p ==.(QMpval))}else{ bquote(p < 0.001)}
labelstatns <- bquote((.(nstud) ~ ";" ~ .(nobs)))
labelstat <- bquote(list(.(labelstatq), .(labelstatp), .(labelstatns)))
## plot
ggplot(dat, aes(x=ECD_max, y=zcor.ECD.B, size = weight)) +
geom_point(col = mycol, pch = 1)+
# titles and axis labels
ylab("Effect on decomposers")+
xlab(if( dat$ECD.type[1]=="Stressors"){"Stressor intensity"} else{"Nutrient intensity"})+
ggtitle(plottitle) +
# axis lenght
# ylim(c(-3.1,2.6))+
xlim(c(-2.2,10.1))+
# mark the zeros lines
geom_hline(yintercept = 0, color = "black")+
geom_vline(xintercept = 0, color = "black")+
# add sloeps and conf intervals
geom_abline(slope = slope$b[2], intercept = slope$b[1], col = mycol, size = sizeline,
linetype = 1+ifelse(anova(slope, btt=2)[2]$QMp>0.05, 1, 0))+ # change lty according to p-value of the meta-regression
# annotate with stats and sample sizes
annotate('label', x = 10.1, y = (min(dat$zcor.ECD.B)- 0.75), hjust = 1,
label=deparse(labelstat), parse = TRUE, size = sizeannotate, fill = "white", label.size = NA)+
# theme stff
theme_bw() +
theme(
axis.text.y = element_text(face = "bold", size = sizetext),
axis.text.x = element_text(face = "bold", size = sizetext),
axis.title.x = element_text(size = sizetext, face = "bold"),
axis.title.y = element_text(size = sizetext, face = "bold"),
#legend
plot.title = element_text(size = sizetext),
legend.position = "none")
}
Fig_Sup_ECDmax <-
# upper panels for stressors
myfun_B_ECD(pol_BEF_es, "Diversity", colo_lea[1])+
myfun_B_ECD(pol_DEF_es, "Abundance", colo_lea[1])+
myfun_LD_ECD(pol_DEF_es[!duplicated(pol_DEF_es$clusterID_LD),], "Decomposition", colo_lea[1]) +
#lower panels for resources
myfun_B_ECD(nut_BEF_es, "Diversity", colo_lea[2]) +
myfun_B_ECD(nut_DEF_es, "Abundance", colo_lea[2]) +
myfun_LD_ECD(nut_DEF_es[!duplicated(nut_DEF_es$clusterID_LD),], "Decomposition", colo_lea[2])
# Fig_Sup_ECDmax
# save a png with high res
ppi <- 300 #final: 600 # resolution
w <- 21 # width in cm
png("figs/Fig6_ECDintensity.png",
width=w,
height=w/1.5,
units = "cm",
res=ppi)
Fig_Sup_ECDmax
dev.off()
## 4. FIGURE Categorical moderators----------------------------------------------------------
## This script creates figure 7 - mean effect sizes on decomposer diversity and abundance
# per level of categorical moderators
# For plotting purposes, separate meta-analyses are run to derive the mean effect sizes for the different datasets
# I used metafor package to derive proper confidence intervals for meta-analysis
# The complete dataset is used ( no need for data resampling in this univariate approach)
# a function to run meta-analysis on each categorical predictor and get the mean effect sizes, CI and stats
myfun_forestplot <- function(res){
y<-res$b
ci_l<-res$ci.lb
ci_h<-res$ci.ub
fgdf<-data.frame(cbind(y,ci_l,ci_h))
colnames(fgdf)[1]<-"y"
colnames(fgdf)[2]<-"ci_l"
colnames(fgdf)[3]<-"ci_h"
fgdf$catego_pred <- factor(rownames(res$b))
return(fgdf)
}
## 4.1. STUDY TYPE
# extract mean effect sizes and CI for levels of study type (observational versus experimental studies)
study_pol_bef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B, # Stressors - Biodiv dataset
mods = ~ study_type - 1,
random = ~ 1 | Case.study / ID,
data = pol_BEF_es)
res.study_pol_bef <- myfun_forestplot(study_pol_bef)
res.study_pol_bef$no.stu <- as.numeric(countstudies(pol_BEF_es, study_type)$no.stu)
res.study_pol_bef$no.obs <- as.numeric(countstudies(pol_BEF_es, study_type)$no.obs)
study_pol_def <- rma.mv(zcor.ECD.B, var.zcor.ECD.B, # Stressors - Abdc dataset
mods = ~ study_type - 1,
random = ~ 1 | Case.study / ID,
data = pol_DEF_es)
res.study_pol_def <- myfun_forestplot(study_pol_def)
res.study_pol_def$no.stu <- as.numeric(countstudies(pol_DEF_es, study_type)$no.stu)
res.study_pol_def$no.obs <- as.numeric(countstudies(pol_DEF_es, study_type)$no.obs)
study_nut_bef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B, # Nutrients - Biodiv dataset
mods = ~ study_type - 1,
random = ~ 1 | Case.study / ID,
data = nut_BEF_es)
res.study_nut_bef <- myfun_forestplot(study_nut_bef)
res.study_nut_bef$no.stu <- as.numeric(countstudies(nut_BEF_es, study_type)$no.stu)
res.study_nut_bef$no.obs <- as.numeric(countstudies(nut_BEF_es, study_type)$no.obs)
study_nut_def <- rma.mv(zcor.ECD.B, var.zcor.ECD.B, # Nutrients - Abdc dataset
mods = ~ study_type - 1,
random = ~ 1 | Case.study / ID,
data = nut_DEF_es)
res.study_nut_def <- myfun_forestplot(study_nut_def)
res.study_nut_def$no.stu <- as.numeric(countstudies(nut_DEF_es, study_type)$no.stu)
res.study_nut_def$no.obs <- as.numeric(countstudies(nut_DEF_es, study_type)$no.obs)
## 4.2. TAXO GROUP
# extract mean effect sizes and CI for levels of taxonomic group (animal versus microbial decomposers)
taxo_pol_bef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ taxonomic.group - 1,
random = ~ 1 | Case.study / ID,
data = pol_BEF_es)
res.taxo_pol_bef <- myfun_forestplot(taxo_pol_bef)
res.taxo_pol_bef$no.stu <- as.numeric(countstudies(pol_BEF_es, taxonomic.group)$no.stu)
res.taxo_pol_bef$no.obs <- as.numeric(countstudies(pol_BEF_es, taxonomic.group)$no.obs)
taxo_pol_def <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ taxonomic.group - 1,
random = ~ 1 | Case.study / ID,
data = pol_DEF_es)
res.taxo_pol_def <- myfun_forestplot(taxo_pol_def)
res.taxo_pol_def$no.stu <- as.numeric(countstudies(pol_DEF_es, taxonomic.group)$no.stu)
res.taxo_pol_def$no.obs <- as.numeric(countstudies(pol_DEF_es, taxonomic.group)$no.obs)
taxo_nut_bef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ taxonomic.group - 1,
random = ~ 1 | Case.study / ID,
data = nut_BEF_es)
res.taxo_nut_bef <- myfun_forestplot(taxo_nut_bef)
res.taxo_nut_bef$no.stu <- as.numeric(countstudies(nut_BEF_es, taxonomic.group)$no.stu)
res.taxo_nut_bef$no.obs <- as.numeric(countstudies(nut_BEF_es, taxonomic.group)$no.obs)
taxo_nut_def <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ taxonomic.group - 1,
random = ~ 1 | Case.study / ID,
data = nut_DEF_es)
res.taxo_nut_def <- myfun_forestplot(taxo_nut_def)
res.taxo_nut_def$no.stu <- as.numeric(countstudies(nut_DEF_es, taxonomic.group)$no.stu)
res.taxo_nut_def$no.obs <- as.numeric(countstudies(nut_DEF_es, taxonomic.group)$no.obs)
## Store the results for each dataset
Res.forest_pol_bef <- rbind(res.study_pol_bef, res.taxo_pol_bef)
Res.forest_pol_bef$categomods <- factor(ifelse(grepl("expe", rownames(Res.forest_pol_bef)), "Experimental",
ifelse(grepl("obser", rownames(Res.forest_pol_bef)), "Observational",
ifelse(grepl("groupA", rownames(Res.forest_pol_bef)), "Animals", "Microbes"))))
Res.forest_pol_def <- rbind(res.study_pol_def, res.taxo_pol_def)
Res.forest_pol_def$categomods <- factor(ifelse(grepl("expe", rownames(Res.forest_pol_def)), "Experimental",
ifelse(grepl("obser", rownames(Res.forest_pol_def)), "Observational",
ifelse(grepl("groupA", rownames(Res.forest_pol_def)), "Animals", "Microbes"))))
Res.forest_nut_bef <- rbind(res.study_nut_bef, res.taxo_nut_bef)
Res.forest_nut_bef$categomods <- factor(ifelse(grepl("expe", rownames(Res.forest_nut_bef)), "Experimental",
ifelse(grepl("obser", rownames(Res.forest_nut_bef)), "Observational",
ifelse(grepl("groupA", rownames(Res.forest_nut_bef)), "Animals", "Microbes"))))
Res.forest_nut_def <- rbind(res.study_nut_def, res.taxo_nut_def)
Res.forest_nut_def$categomods <- factor(ifelse(grepl("expe", rownames(Res.forest_nut_def)), "Experimental",
ifelse(grepl("obser", rownames(Res.forest_nut_def)), "Observational",
ifelse(grepl("groupA", rownames(Res.forest_nut_def)), "Animals", "Microbes"))))
## FIGURE - make a Forest plots
# pick colors
colo_lea <- c("#0072B2", "#D55E00")
# set sizes of plotted elements (from Fig1)
sizetext <- 13
sizepoint <- 3
widtherrorbar <- 0.1
sizeerrorbar <- 0.4
## function to make a forest ggplot
myfun_Forestggplot_B2 <- function(df, plottitle, mycol){
# reorder factor levels
df$categomods2 <- factor(df$categomods, c("Observational", "Experimental", "Microbes", "Animals"))
# make plot
ggplot(df, aes(x=categomods2, y=y, shape = categomods2))+
# error bars are conf intervals 95%
geom_errorbar(width=widtherrorbar,
size = sizeerrorbar,
aes(ymin = df$ci_l,
ymax = df$ci_h), # confidence intervals from Std err of models
col = mycol) +
# points shape and colors
geom_point(size= sizepoint, col = mycol, fill = mycol)+
# change their shape (pch)
scale_shape_manual(values=c(17,2,19,1))+ # Use a hollow circle and triangle
# axis
ylim(-1.2, 1.2)+
ylab("Effect size")+
xlab(" ")+
# flip the coordinates to make forest plot
coord_flip()+
# add lines
geom_hline(yintercept = 0)+
geom_vline(xintercept = 2.5, lty= 2)+
# add no. studies and observation to axis labels
scale_x_discrete(breaks=c("Observational", "Experimental", "Microbes", "Animals"),
labels=c(paste("Obs. (",df$no.stu[2], "; ", df$no.obs[2], ")", sep = ""),
paste("Expe. (", df$no.stu[1], "; ", df$no.obs[1],")", sep = ""),
paste("Microbes (", df$no.stu[4], "; ", df$no.obs[4],")", sep = ""),
paste("Animals (", df$no.stu[3], "; ", df$no.obs[3],")", sep = ""))) +
# theme and design
theme_bw() +
ggtitle(plottitle) +
# theme stff
theme(axis.text.y=element_text(face = "bold", size = sizetext),
axis.text.x=element_text(face = "bold", size = sizetext),
axis.title.x = element_text(size=sizetext, face = "bold"),
axis.title.y = element_text(size=sizetext, face = "bold"),
plot.title = element_text(size = sizetext),
legend.title = element_text(size = sizetext),
legend.position = "none",
legend.text = element_text(size = sizetext))
}
# Plots for the 4 datasets
Fig_catmods <-
myfun_Forestggplot_B2(df = Res.forest_pol_bef,
plottitle = "Stressors - Biodiversity",
mycol = colo_lea[1] )+
myfun_Forestggplot_B2(df = Res.forest_pol_def,
plottitle = "Stressors - Abundance",
mycol = colo_lea[1] )+
myfun_Forestggplot_B2(df = Res.forest_nut_bef,
plottitle = "Nutrients - Biodiversity",
mycol = colo_lea[2] )+
myfun_Forestggplot_B2(df = Res.forest_nut_def,
plottitle = "Nutrients - Abundance",
mycol = colo_lea[2] )
# Fig_catmods
# save a png with high res
ppi <- 300 # 600 final resolution
w <- 21 # width in cm
png("figs/Fig7_CategoMods.png",
width=w,
height=w/1.3,
units = "cm",
res=ppi)
Fig_catmods
dev.off()
| /0301_SecondLevelMA.R | permissive | elifesciences-publications/BEFunderGlobalChange | R | false | false | 22,409 | r | ####################################################################################################################
##
## SECOND-LEVEL META-ANALYSIS
##
###################################################################################################################
# This script reproduces the results of the meta-analysis testing the effect of covariates on
# decomposers and decompositionr esponses to stressors and nutrients
## 1. LOAD Data---------------------------------------------------------------------------
source("0201_LOAD_Data.R")
## 2. MODELS------------------------------------------------------------------------------
## Run second-level meta-analyses of decomposers responses (diversity and abundance)
# with metafor to derive confidence intervals, QM stats and p-value
# and using all the data (no need for data resampling in this univariate approach)
# these models correspond to the submodels for biodiversity and abundance responses in the piecewise SEMs
modelrma_polbef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max +
study_type + taxonomic.group + B.metric,
random = ~ 1 | Case.study / ID,
data = pol_BEF_es)
modelrma_poldef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max +
study_type + taxonomic.group,
random = ~ 1 | Case.study / ID,
data = pol_DEF_es)
modelrma_nutbef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max +
study_type + taxonomic.group + B.metric,
random = ~ 1 | Case.study / ID,
data = nut_BEF_es)
modelrma_nutdef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max +
study_type + taxonomic.group,
random = ~ 1 | Case.study / ID,
data = nut_DEF_es)
# as all factors have only two levels, the pvalues in the summary test the significance of the factor overall
# taxonomic.group effect
resanova_taxo <- data.frame(rbind(
pol_bef = as.numeric(c(anova(modelrma_polbef, btt = 4)[1:2])),
pol_def = as.numeric(c(anova(modelrma_poldef, btt = 4)[1:2])),
nut_bef = as.numeric(c(anova(modelrma_nutbef, btt = 4)[1:2])),
nut_def = as.numeric(c(anova(modelrma_nutdef, btt = 4)[1:2]))))
resanova_taxo$Predictor <- rep("Taxonomic group", 4)
resanova_taxo$Response <- rep(c("Diversity", "Abundance"), 2)
resanova_taxo$ECD.type <- c(rep("Stressors", 2), rep("Nutrients", 2))
# study type effect
resanova_study <- data.frame(rbind(
pol_bef = as.numeric(c(anova(modelrma_polbef, btt = 3)[1:2])),
pol_def = as.numeric(c(anova(modelrma_poldef, btt = 3)[1:2])),
nut_bef = as.numeric(c(anova(modelrma_nutbef, btt = 3)[1:2])),
nut_def = as.numeric(c(anova(modelrma_nutdef, btt = 3)[1:2]))))
resanova_study$Predictor <- rep("Study type", 4)
resanova_study$Response <- rep(c("Diversity", "Abundance"), 2)
resanova_study$ECD.type <- c(rep("Stressors", 2), rep("Nutrients", 2))
# B.metric effect
resanova_Bmetric <- data.frame(rbind(
pol_bef = as.numeric(c(anova(modelrma_polbef, btt = 5)[1:2])),
pol_def = rep(NA, 2),
nut_bef = as.numeric(c(anova(modelrma_nutbef, btt = 5)[1:2])),
nut_def = rep(NA, 2)))
resanova_Bmetric$Predictor <- rep("Diversity metric", 4)
resanova_Bmetric$Response <- rep(c("Diversity", "Abundance"), 2)
resanova_Bmetric$ECD.type <- c(rep("Stressors", 2), rep("Nutrients", 2))
names(resanova_taxo)[c(1,2)] <- names(resanova_study)[c(1,2)] <- names(resanova_Bmetric)[c(1,2)] <- c("QM", "P")
print(resanova_taxo)
print(resanova_study)
print(resanova_Bmetric)
# save results
write.csv(resanova_taxo, "tables/ResMATaxo.csv")
write.csv(resanova_study, "tables/ResMAStudy.csv")
write.csv(resanova_Bmetric, "tables/ResMABmetric.csv")
# second-level meta-analyses for decomposition: remove duplicates ES on decompo
modelrmald_polbef <- rma.mv(zcor.ECD.LD, var.zcor.ECD.LD,
mods = ~ zcor.ECD.B + ECD_max +
study_type,
random = ~ 1 | Case.study / ID,
data = pol_BEF_es[!duplicated(pol_BEF_es$clusterID_LD),])
modelrmald_poldef <- rma.mv(zcor.ECD.LD, var.zcor.ECD.LD,
mods = ~ zcor.ECD.B+ ECD_max +
study_type,
random = ~ 1 | Case.study / ID,
data = pol_DEF_es[!duplicated(pol_DEF_es$clusterID_LD),])
modelrmald_nutbef <- rma.mv(zcor.ECD.LD, var.zcor.ECD.LD,
mods = ~ zcor.ECD.B+ ECD_max +
study_type,
random = ~ 1 | Case.study / ID,
data = nut_BEF_es[!duplicated(nut_BEF_es$clusterID_LD),])
modelrmald_nutdef <- rma.mv(zcor.ECD.LD, var.zcor.ECD.LD,
mods = ~ zcor.ECD.B+ ECD_max +
study_type,
random = ~ 1 | Case.study / ID,
data = nut_DEF_es[!duplicated(nut_DEF_es$clusterID_LD),])
# as all factors have only two levels, the pvalues in the summary test the significance of the factor
# study type effect
resanovaLD_study <- data.frame(rbind(
pol_bef = as.numeric(c(anova(modelrmald_polbef, btt = 4)[1:2])),
pol_def = as.numeric(c(anova(modelrmald_poldef, btt = 4)[1:2])),
nut_bef = as.numeric(c(anova(modelrmald_nutbef, btt = 4)[1:2])),
nut_def = as.numeric(c(anova(modelrmald_nutdef, btt = 4)[1:2]))))
resanovaLD_study$Predictor <- rep("Study type", 4)
resanovaLD_study$Response <- rep(c("Decomposition (Div)", "Decomposition (Abd)"), 2)
resanovaLD_study$ECD.type <- c(rep("Stressors", 2), rep("Nutrients", 2))
names(resanovaLD_study)[c(1,2)] <- c("QM", "P")
print(resanovaLD_study)
write.csv(resanovaLD_study, "tables/ResMALDstudy.csv")
## 3. FIGURE Stressor and nutrient intensity effects-----------------------------------------
# This code creates Figure 6 of the manuscript
colo_lea <- c("#0072B2", "#D55E00")
# set sizes of plotted elements
sizetext <- 12
sizelegend <- 11
sizepoint <- 1
sizeline <- 0.8
sizeannotate <- 3.4
## Function to create panel for each outcome
# Panels LD
myfun_LD_ECD <- function(dat, plottitle, mycol){
dat$weight = 1/dat$var.zcor.ECD.LD
## calculate slope B-EF with a meta-regression
slope <- rma.mv(zcor.ECD.LD, var.zcor.ECD.LD,
mods = ~ ECD_max + study_type,
random = ~ 1 | Case.study/ID,
data = dat)
# calculate confidence interval around the slope
predframe <- with(dat,
data.frame(zcor.ECD.LD, ECD_max,
preds = predict(slope)$pred,
lwr = predict(slope)$ci.lb,
upr = predict(slope)$ci.ub))
# extract statistics
QMstat <- round(anova(slope, btt=2)[1]$QM, 1)
QMpval <- round(anova(slope, btt=2)[2]$QMp, 2)
nstud <- slope$s.nlevels[1]
nobs <- slope$k
# bquote to annotate the plot with stats and sample sizes
labelstatq <- bquote(QM[df == 1] == .(QMstat))
labelstatp <- if(QMpval>0.001) {bquote(p ==.(QMpval))}else{ bquote(p < 0.001)}
labelstatns <- bquote((.(nstud) ~ ";" ~ .(nobs)))
labelstat <- bquote(list(.(labelstatq), .(labelstatp), .(labelstatns)))
## plot
ggplot(dat, aes(x=ECD_max, y=zcor.ECD.LD,
size = weight)) +
geom_point(col = mycol, pch = 1)+ #, size = sizepoint) +
# titles and axis labels
ylab("Effect on decomposition")+
xlab(if( dat$ECD.type[1]=="Stressors"){"Stressor intensity"} else{"Nutrient intensity"})+
ggtitle(plottitle) +
# axis lenght
# ylim(c(-3.1,2.6))+
xlim(c(-2.2,10.1))+
# mark the zeros lines
geom_hline(yintercept = 0, color = "black")+
geom_vline(xintercept = 0, color = "black")+
# annotate with stats and sample sizes
annotate('label', x = 10.1, y = min(dat$zcor.ECD.LD)- 0.7, hjust = 1,
label=deparse(labelstat), parse = TRUE, size = sizeannotate, , fill = "white", label.size = NA)+
# add slope if significant and conf intervals
# add sloeps and conf intervals
geom_abline(slope = slope$b[2], intercept = slope$b[1], col = mycol, size = sizeline,
linetype = 1+ifelse(anova(slope, btt=2)[2]$QMp>0.05, 1, 0))+ # change lty according to p-value of the meta-regression
# theme stff
theme_bw() +
theme(
axis.text.y = element_text(face = "bold", size = sizetext),
axis.text.x = element_text(face = "bold", size = sizetext),
axis.title.x = element_text(size = sizetext, face = "bold"),
axis.title.y = element_text(size = sizetext, face = "bold"),
#legend
plot.title = element_text(size = sizetext),
legend.position = "none")
}
# Panels B
myfun_B_ECD <- function(dat, plottitle, mycol, xaxis){
dat$weight = 1/dat$var.zcor.ECD.B
## calculate slope B-EF with a meta-regression
if( plottitle=="Abundance"){
slope <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max + study_type + taxonomic.group,
random = ~ 1 | Case.study/ID,
data = dat)}
else{slope <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ ECD_max + study_type + taxonomic.group + B.metric,
random = ~ 1 | Case.study/ID,
data = dat)}
# calculate confidence interval around the slope
predframe <- with(dat,
data.frame(zcor.ECD.B, ECD_max,
preds = predict(slope)$pred,
lwr = predict(slope)$ci.lb,
upr = predict(slope)$ci.ub))
# extract statistics
QMstat <- round(anova(slope, btt=2)[1]$QM, 1)
QMpval <- round(anova(slope, btt=2)[2]$QMp, 2)
nstud <- slope$s.nlevels[1]
nobs <- slope$k
# bquote to annotate the plot with stats and sample sizes
labelstatq <- bquote(QM[df == 1] == .(QMstat))
labelstatp <- if(QMpval>0.001) {bquote(p ==.(QMpval))}else{ bquote(p < 0.001)}
labelstatns <- bquote((.(nstud) ~ ";" ~ .(nobs)))
labelstat <- bquote(list(.(labelstatq), .(labelstatp), .(labelstatns)))
## plot
ggplot(dat, aes(x=ECD_max, y=zcor.ECD.B, size = weight)) +
geom_point(col = mycol, pch = 1)+
# titles and axis labels
ylab("Effect on decomposers")+
xlab(if( dat$ECD.type[1]=="Stressors"){"Stressor intensity"} else{"Nutrient intensity"})+
ggtitle(plottitle) +
# axis lenght
# ylim(c(-3.1,2.6))+
xlim(c(-2.2,10.1))+
# mark the zeros lines
geom_hline(yintercept = 0, color = "black")+
geom_vline(xintercept = 0, color = "black")+
# add sloeps and conf intervals
geom_abline(slope = slope$b[2], intercept = slope$b[1], col = mycol, size = sizeline,
linetype = 1+ifelse(anova(slope, btt=2)[2]$QMp>0.05, 1, 0))+ # change lty according to p-value of the meta-regression
# annotate with stats and sample sizes
annotate('label', x = 10.1, y = (min(dat$zcor.ECD.B)- 0.75), hjust = 1,
label=deparse(labelstat), parse = TRUE, size = sizeannotate, fill = "white", label.size = NA)+
# theme stff
theme_bw() +
theme(
axis.text.y = element_text(face = "bold", size = sizetext),
axis.text.x = element_text(face = "bold", size = sizetext),
axis.title.x = element_text(size = sizetext, face = "bold"),
axis.title.y = element_text(size = sizetext, face = "bold"),
#legend
plot.title = element_text(size = sizetext),
legend.position = "none")
}
Fig_Sup_ECDmax <-
# upper panels for stressors
myfun_B_ECD(pol_BEF_es, "Diversity", colo_lea[1])+
myfun_B_ECD(pol_DEF_es, "Abundance", colo_lea[1])+
myfun_LD_ECD(pol_DEF_es[!duplicated(pol_DEF_es$clusterID_LD),], "Decomposition", colo_lea[1]) +
#lower panels for resources
myfun_B_ECD(nut_BEF_es, "Diversity", colo_lea[2]) +
myfun_B_ECD(nut_DEF_es, "Abundance", colo_lea[2]) +
myfun_LD_ECD(nut_DEF_es[!duplicated(nut_DEF_es$clusterID_LD),], "Decomposition", colo_lea[2])
# Fig_Sup_ECDmax
# save a png with high res
ppi <- 300 #final: 600 # resolution
w <- 21 # width in cm
png("figs/Fig6_ECDintensity.png",
width=w,
height=w/1.5,
units = "cm",
res=ppi)
Fig_Sup_ECDmax
dev.off()
## 4. FIGURE Categorical moderators----------------------------------------------------------
## This script creates figure 7 - mean effect sizes on decomposer diversity and abundance
# per level of categorical moderators
# For plotting purposes, separate meta-analyses are run to derive the mean effect sizes for the different datasets
# I used metafor package to derive proper confidence intervals for meta-analysis
# The complete dataset is used ( no need for data resampling in this univariate approach)
# a function to run meta-analysis on each categorical predictor and get the mean effect sizes, CI and stats
myfun_forestplot <- function(res){
y<-res$b
ci_l<-res$ci.lb
ci_h<-res$ci.ub
fgdf<-data.frame(cbind(y,ci_l,ci_h))
colnames(fgdf)[1]<-"y"
colnames(fgdf)[2]<-"ci_l"
colnames(fgdf)[3]<-"ci_h"
fgdf$catego_pred <- factor(rownames(res$b))
return(fgdf)
}
## 4.1. STUDY TYPE
# extract mean effect sizes and CI for levels of study type (observational versus experimental studies)
study_pol_bef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B, # Stressors - Biodiv dataset
mods = ~ study_type - 1,
random = ~ 1 | Case.study / ID,
data = pol_BEF_es)
res.study_pol_bef <- myfun_forestplot(study_pol_bef)
res.study_pol_bef$no.stu <- as.numeric(countstudies(pol_BEF_es, study_type)$no.stu)
res.study_pol_bef$no.obs <- as.numeric(countstudies(pol_BEF_es, study_type)$no.obs)
study_pol_def <- rma.mv(zcor.ECD.B, var.zcor.ECD.B, # Stressors - Abdc dataset
mods = ~ study_type - 1,
random = ~ 1 | Case.study / ID,
data = pol_DEF_es)
res.study_pol_def <- myfun_forestplot(study_pol_def)
res.study_pol_def$no.stu <- as.numeric(countstudies(pol_DEF_es, study_type)$no.stu)
res.study_pol_def$no.obs <- as.numeric(countstudies(pol_DEF_es, study_type)$no.obs)
study_nut_bef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B, # Nutrients - Biodiv dataset
mods = ~ study_type - 1,
random = ~ 1 | Case.study / ID,
data = nut_BEF_es)
res.study_nut_bef <- myfun_forestplot(study_nut_bef)
res.study_nut_bef$no.stu <- as.numeric(countstudies(nut_BEF_es, study_type)$no.stu)
res.study_nut_bef$no.obs <- as.numeric(countstudies(nut_BEF_es, study_type)$no.obs)
study_nut_def <- rma.mv(zcor.ECD.B, var.zcor.ECD.B, # Nutrients - Abdc dataset
mods = ~ study_type - 1,
random = ~ 1 | Case.study / ID,
data = nut_DEF_es)
res.study_nut_def <- myfun_forestplot(study_nut_def)
res.study_nut_def$no.stu <- as.numeric(countstudies(nut_DEF_es, study_type)$no.stu)
res.study_nut_def$no.obs <- as.numeric(countstudies(nut_DEF_es, study_type)$no.obs)
## 4.2. TAXO GROUP
# extract mean effect sizes and CI for levels of taxonomic group (animal versus microbial decomposers)
taxo_pol_bef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ taxonomic.group - 1,
random = ~ 1 | Case.study / ID,
data = pol_BEF_es)
res.taxo_pol_bef <- myfun_forestplot(taxo_pol_bef)
res.taxo_pol_bef$no.stu <- as.numeric(countstudies(pol_BEF_es, taxonomic.group)$no.stu)
res.taxo_pol_bef$no.obs <- as.numeric(countstudies(pol_BEF_es, taxonomic.group)$no.obs)
taxo_pol_def <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ taxonomic.group - 1,
random = ~ 1 | Case.study / ID,
data = pol_DEF_es)
res.taxo_pol_def <- myfun_forestplot(taxo_pol_def)
res.taxo_pol_def$no.stu <- as.numeric(countstudies(pol_DEF_es, taxonomic.group)$no.stu)
res.taxo_pol_def$no.obs <- as.numeric(countstudies(pol_DEF_es, taxonomic.group)$no.obs)
taxo_nut_bef <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ taxonomic.group - 1,
random = ~ 1 | Case.study / ID,
data = nut_BEF_es)
res.taxo_nut_bef <- myfun_forestplot(taxo_nut_bef)
res.taxo_nut_bef$no.stu <- as.numeric(countstudies(nut_BEF_es, taxonomic.group)$no.stu)
res.taxo_nut_bef$no.obs <- as.numeric(countstudies(nut_BEF_es, taxonomic.group)$no.obs)
taxo_nut_def <- rma.mv(zcor.ECD.B, var.zcor.ECD.B,
mods = ~ taxonomic.group - 1,
random = ~ 1 | Case.study / ID,
data = nut_DEF_es)
res.taxo_nut_def <- myfun_forestplot(taxo_nut_def)
res.taxo_nut_def$no.stu <- as.numeric(countstudies(nut_DEF_es, taxonomic.group)$no.stu)
res.taxo_nut_def$no.obs <- as.numeric(countstudies(nut_DEF_es, taxonomic.group)$no.obs)
## Store the results for each dataset
Res.forest_pol_bef <- rbind(res.study_pol_bef, res.taxo_pol_bef)
Res.forest_pol_bef$categomods <- factor(ifelse(grepl("expe", rownames(Res.forest_pol_bef)), "Experimental",
ifelse(grepl("obser", rownames(Res.forest_pol_bef)), "Observational",
ifelse(grepl("groupA", rownames(Res.forest_pol_bef)), "Animals", "Microbes"))))
Res.forest_pol_def <- rbind(res.study_pol_def, res.taxo_pol_def)
Res.forest_pol_def$categomods <- factor(ifelse(grepl("expe", rownames(Res.forest_pol_def)), "Experimental",
ifelse(grepl("obser", rownames(Res.forest_pol_def)), "Observational",
ifelse(grepl("groupA", rownames(Res.forest_pol_def)), "Animals", "Microbes"))))
Res.forest_nut_bef <- rbind(res.study_nut_bef, res.taxo_nut_bef)
Res.forest_nut_bef$categomods <- factor(ifelse(grepl("expe", rownames(Res.forest_nut_bef)), "Experimental",
ifelse(grepl("obser", rownames(Res.forest_nut_bef)), "Observational",
ifelse(grepl("groupA", rownames(Res.forest_nut_bef)), "Animals", "Microbes"))))
Res.forest_nut_def <- rbind(res.study_nut_def, res.taxo_nut_def)
Res.forest_nut_def$categomods <- factor(ifelse(grepl("expe", rownames(Res.forest_nut_def)), "Experimental",
ifelse(grepl("obser", rownames(Res.forest_nut_def)), "Observational",
ifelse(grepl("groupA", rownames(Res.forest_nut_def)), "Animals", "Microbes"))))
## FIGURE - make a Forest plots
# pick colors
colo_lea <- c("#0072B2", "#D55E00")
# set sizes of plotted elements (from Fig1)
sizetext <- 13
sizepoint <- 3
widtherrorbar <- 0.1
sizeerrorbar <- 0.4
## function to make a forest ggplot
myfun_Forestggplot_B2 <- function(df, plottitle, mycol){
# reorder factor levels
df$categomods2 <- factor(df$categomods, c("Observational", "Experimental", "Microbes", "Animals"))
# make plot
ggplot(df, aes(x=categomods2, y=y, shape = categomods2))+
# error bars are conf intervals 95%
geom_errorbar(width=widtherrorbar,
size = sizeerrorbar,
aes(ymin = df$ci_l,
ymax = df$ci_h), # confidence intervals from Std err of models
col = mycol) +
# points shape and colors
geom_point(size= sizepoint, col = mycol, fill = mycol)+
# change their shape (pch)
scale_shape_manual(values=c(17,2,19,1))+ # Use a hollow circle and triangle
# axis
ylim(-1.2, 1.2)+
ylab("Effect size")+
xlab(" ")+
# flip the coordinates to make forest plot
coord_flip()+
# add lines
geom_hline(yintercept = 0)+
geom_vline(xintercept = 2.5, lty= 2)+
# add no. studies and observation to axis labels
scale_x_discrete(breaks=c("Observational", "Experimental", "Microbes", "Animals"),
labels=c(paste("Obs. (",df$no.stu[2], "; ", df$no.obs[2], ")", sep = ""),
paste("Expe. (", df$no.stu[1], "; ", df$no.obs[1],")", sep = ""),
paste("Microbes (", df$no.stu[4], "; ", df$no.obs[4],")", sep = ""),
paste("Animals (", df$no.stu[3], "; ", df$no.obs[3],")", sep = ""))) +
# theme and design
theme_bw() +
ggtitle(plottitle) +
# theme stff
theme(axis.text.y=element_text(face = "bold", size = sizetext),
axis.text.x=element_text(face = "bold", size = sizetext),
axis.title.x = element_text(size=sizetext, face = "bold"),
axis.title.y = element_text(size=sizetext, face = "bold"),
plot.title = element_text(size = sizetext),
legend.title = element_text(size = sizetext),
legend.position = "none",
legend.text = element_text(size = sizetext))
}
# Plots for the 4 datasets
Fig_catmods <-
myfun_Forestggplot_B2(df = Res.forest_pol_bef,
plottitle = "Stressors - Biodiversity",
mycol = colo_lea[1] )+
myfun_Forestggplot_B2(df = Res.forest_pol_def,
plottitle = "Stressors - Abundance",
mycol = colo_lea[1] )+
myfun_Forestggplot_B2(df = Res.forest_nut_bef,
plottitle = "Nutrients - Biodiversity",
mycol = colo_lea[2] )+
myfun_Forestggplot_B2(df = Res.forest_nut_def,
plottitle = "Nutrients - Abundance",
mycol = colo_lea[2] )
# Fig_catmods
# save a png with high res
ppi <- 300 # 600 final resolution
w <- 21 # width in cm
png("figs/Fig7_CategoMods.png",
width=w,
height=w/1.3,
units = "cm",
res=ppi)
Fig_catmods
dev.off()
|
library(Rsolnp)
library(data.table)
library(ggplot2)
library(optparse)
library(raster)
source("hmm_functions.R")
opt_list <- list(make_option("--mapbiomas_raster_path", default="./HMM_MapBiomas_v2/mapbiomas.vrt"),
make_option("--row", default=50000, type="integer"),
make_option("--col", default=51000, type="integer"),
make_option("--width_in_pixels", default=1000, type="integer"),
make_option("--subsample", default=0.1, type="double"),
make_option("--class_frequency_cutoff", default=0.005, type="double"),
make_option("--n_random_starts_em", default=2, type="integer"),
make_option("--n_random_starts_md", default=1, type="integer"),
make_option("--grassland_as_forest", default=FALSE, action="store_true"),
make_option("--combine_other_non_forest", default=FALSE, action="store_true"),
make_option("--skip_ml_if_md_is_diag_dominant", default=FALSE, action="store_true"),
make_option("--use_md_as_initial_values_for_em", default=FALSE, action="store_true"))
opt <- parse_args(OptionParser(option_list=opt_list))
message("command line options: ", paste(sprintf("%s=%s", names(opt), opt), collapse=", "))
mapbiomas <- stack(opt$mapbiomas_raster_path)
nlayers(mapbiomas)
window <- getValuesBlock(mapbiomas,
row=opt$row,
col=opt$col,
nrows=opt$width_in_pixels,
ncols=opt$width_in_pixels)
dim(window)
window_extent <- extent(mapbiomas, opt$row, opt$row + opt$width_in_pixels, opt$col, opt$col + opt$width_in_pixels)
window_raster<- raster(window_extent, crs=crs(mapbiomas), nrows=opt$width_in_pixels, ncols=opt$width_in_pixels)
for(time_index in c(1, 8)) {
values(window_raster) <- window[, time_index]
filename <- sprintf("./atlantic_forest_output/raster_window_%s_%s_width_%s_band_%s.tif", opt$row, opt$col, opt$width_in_pixels, time_index)
## These .tifs aren't used anywhere in the code, but it can be helpful to inspect these rasters in qgis
message("Writing ", filename)
writeRaster(window_raster, filename, overwrite=TRUE)
}
class_frequencies_before_combining <- round(table(window) / (nrow(window) * ncol(window)), 4)
pr_missing <- mean(is.na(window))
pr_water_or_sand <- mean(window %in% c(22, 33))
if(pr_missing > 0.9 || pr_water_or_sand > 0.5) {
message("Window ", opt$row, " ", opt$col, " is missing at rate ",
pr_missing, ", ",
pr_water_or_sand, " water or sand (averaging over all bands), ",
"skipping estimation")
quit()
}
n_years <- ncol(window)
for(time_index in seq_len(n_years)) {
pr_missing <- mean(is.na(window[, time_index]))
if(pr_missing > 0.9) {
message("Window ", opt$row, " ", opt$col, " is missing at rate ",
pr_missing, " at time index (i.e. band) ", time_index,
", skipping estimation")
quit()
}
}
fraction_missing_in_all_years <- mean(rowMeans(is.na(window)) == 1.0)
count_missing_in_all_years <- sum(rowMeans(is.na(window)) == 1.0)
message("Fraction of pixels missing in 100% of years in the original data: ", fraction_missing_in_all_years)
## When constructing our panel (for estimation), we will only consider pixels that contain at least one non-missing observation
## in the original data. This will remove pixels in the ocean and pixels outside of the Atlantic forest region
valid_pixel_index <- rowMeans(is.na(window)) < 1.0
## Combine classes
## Class 12 (grassland) is optionally combined with class 3 (forest)
if(opt$grassland_as_forest) window[window %in% 12] <- 3
## Combine classes
## Classes 4 (savanna formation) and 9 (forest plantation) are combined with class 3 (forest)
window[window %in% c(4, 9)] <- 3
## Combine classes
## Class 11 (wetlands), class 22 (sand), and class 29 (rocky outcrop) are combined with class 33 (rivers and lakes)
window[window %in% c(11, 22, 29)] <- 33
## Combine classes
## Class 13 (other non-forest) is combined with class 33 (already a combination of wetlands, sand, rivers and lakes)
if(opt$combine_other_non_forest) window[window %in% 13] <- 33
## See https://mapbiomas-br-site.s3.amazonaws.com/downloads/Colecction%206/Cod_Class_legenda_Col6_MapBiomas_BR.pdf
unique_mapbiomas_classes <- sort(unique(c(window, recursive=TRUE)))
rare_mapbiomas_classes <- vector("numeric")
for(class in unique_mapbiomas_classes) {
if(mean(window == class, na.rm=TRUE) < opt$class_frequency_cutoff) {
rare_mapbiomas_classes <- c(rare_mapbiomas_classes, class)
}
}
## We are going to recode rare classes as NA
## This is effectively assuming that all observations of rare classes must be misclassifications
## In most windows we will keep classes 3 and 21 (forest and mosaic of pasture + agriculture)
mapbiomas_classes_to_keep <- unique_mapbiomas_classes[!unique_mapbiomas_classes %in% rare_mapbiomas_classes]
message("Keeping the following classes:")
print(mapbiomas_classes_to_keep)
table(window)
class_frequencies <- round(table(window) / (nrow(window) * ncol(window)), 4)
## Careful, there can be missing values (even before we recode rare classes as NA)!
message("Missing value counts in the original data (fraction and sum):")
mean(is.na(c(window, recursive=TRUE)))
sum(is.na(c(window, recursive=TRUE)))
## Note: the code expects observations to be in the set {1, 2, 3, ..., |Y|},
## So we need to recode sets of classes like {3, 21} to {1, 2} for example
window_recoded <- window
for(i in seq_along(mapbiomas_classes_to_keep)) {
class <- mapbiomas_classes_to_keep[i]
window_recoded[window == class] <- i
}
## Rare classes are recoded as NA
message("Recoding the following rare classes as NA:")
print(rare_mapbiomas_classes)
window_recoded[window %in% rare_mapbiomas_classes] <- NA
table(window)
table(window_recoded)
mean(is.na(window_recoded))
message("Fraction of pixels with at least one missing value in the recoded data:")
mean(rowMeans(is.na(window_recoded)) > 0)
message("Fraction of pixels missing in >50% of years in the recoded data:")
mean(rowMeans(is.na(window_recoded)) > .5)
message("Fraction of pixels missing in 100% of years in the recoded data:")
mean(rowMeans(is.na(window_recoded)) == 1.0)
full_panel <- apply(window_recoded, 1, function(y) list(y=as.vector(y), time=seq_along(y)))
## Using prob=valid_pixel_index excludes pixels that have 100% missing observations in the original data
panel <- sample(full_panel, size=length(full_panel) * opt$subsample, replace=FALSE, prob=valid_pixel_index)
## We no longer need the full window at this point, rm it to save memory
rm(window)
rm(full_panel)
gc()
## These aren't actually used in optimization,
## they're just used to create other parameters of the same shape/dimension/time horizon
n_states <- length(mapbiomas_classes_to_keep)
n_time_periods <- ncol(window_recoded)
dummy_pr_transition <- 0.2 * matrix(1/n_states, nrow=n_states, ncol=n_states) + 0.8 * diag(n_states)
dummy_pr_y <- 0.2 * matrix(1/n_states, n_states, n_states) + 0.8 * diag(n_states)
dummy_params <- list(mu=rep(1/n_states, n_states),
P_list=rep(list(dummy_pr_transition), n_time_periods - 1),
pr_y=dummy_pr_y,
n_components=n_states)
estimates <- get_em_and_min_dist_estimates_random_initialization(params=dummy_params,
panel=panel,
n_random_starts_em=opt$n_random_starts_em,
n_random_starts_md=opt$n_random_starts_md,
diag_min=0.8,
diag_max=0.95,
skip_ml_if_md_is_diag_dominant=opt$skip_ml_if_md_is_diag_dominant,
use_md_as_initial_values_for_em=opt$use_md_as_initial_values_for_em)
estimates$P_hat_frequency <- lapply(estimates$M_Y_joint_hat, get_transition_probs_from_M_S_joint)
estimates$mapbiomas_classes_to_keep <- mapbiomas_classes_to_keep
estimates$rare_mapbiomas_classes <- rare_mapbiomas_classes
estimates$class_frequencies <- class_frequencies
estimates$class_frequencies_before_combining <- class_frequencies_before_combining
estimates$options <- opt
estimates$window_bbox <- as.data.frame(bbox(window_extent))
estimates$fraction_missing_in_all_years <- fraction_missing_in_all_years
estimates$count_missing_in_all_years <- count_missing_in_all_years
filename <- sprintf("./atlantic_forest_output/estimates_window_%s_%s_width_%s_class_frequency_cutoff_%s_subsample_%s_combined_classes%s%s%s%s.rds",
opt$row, opt$col, opt$width_in_pixels, opt$class_frequency_cutoff, opt$subsample,
ifelse(opt$grassland_as_forest, "_grassland_as_forest", ""),
ifelse(opt$combine_other_non_forest, "_combine_other_non_forest", ""),
ifelse(opt$skip_ml_if_md_is_diag_dominant, "_skip_ml_if_md_is_diag_dominant", ""),
ifelse(opt$use_md_as_initial_values_for_em, "_use_md_as_initial_values_for_em", ""))
message("Saving ", filename)
saveRDS(estimates, file=filename)
for(class_index in seq_along(estimates$mapbiomas_classes_to_keep)) {
class <- estimates$mapbiomas_classes_to_keep[class_index]
## Diagonals of the transition matrix (for example, Pr[ forest at t+1 | forest at t ])
P_hat_frequency <- sapply(estimates$P_hat_frequency, function(P) P[class_index, class_index])
P_hat_md <- sapply(estimates$min_dist_params_hat_best_objfn$P_list, function(P) P[class_index, class_index])
if("em_params_hat_best_likelihood" %in% names(estimates)) {
P_hat_ml <- sapply(estimates$em_params_hat_best_likelihood$P_list, function(P) P[class_index, class_index])
} else {
P_hat_ml <- rep(NA, length(P_hat_md))
}
df <- data.table(time_index=seq_along(P_hat_frequency), P_hat_frequency=P_hat_frequency, P_hat_md, P_hat_ml)
df_melted <- melt(df, id.vars="time_index")
title <- sprintf("Probability of Remaining in Mapbiomas Class %s", class) # TODO Window info in title?
p <- (ggplot(df_melted, aes(x=time_index, y=value, group=variable, color=variable)) +
geom_point() +
geom_line() +
ggtitle(title) +
theme(plot.title = element_text(hjust = 0.5)) +
scale_color_discrete("algorithm") +
ylab("probability") +
theme_bw())
filename <- sprintf("transition_matrix_diagonals_window_%s_%s_width_%s_class_%s_with_combined_classes_%s.png",
opt$row, opt$col, opt$width_in_pixels, class, ifelse(opt$grassland_as_forest, "grassland_as_forest", ""))
ggsave(p, filename=filename, width=6, height=4, units="in")
}
| /run_estimation_single_mapbiomas_window.R | no_license | atorch/hidden_markov_model | R | false | false | 11,039 | r | library(Rsolnp)
library(data.table)
library(ggplot2)
library(optparse)
library(raster)
source("hmm_functions.R")
opt_list <- list(make_option("--mapbiomas_raster_path", default="./HMM_MapBiomas_v2/mapbiomas.vrt"),
make_option("--row", default=50000, type="integer"),
make_option("--col", default=51000, type="integer"),
make_option("--width_in_pixels", default=1000, type="integer"),
make_option("--subsample", default=0.1, type="double"),
make_option("--class_frequency_cutoff", default=0.005, type="double"),
make_option("--n_random_starts_em", default=2, type="integer"),
make_option("--n_random_starts_md", default=1, type="integer"),
make_option("--grassland_as_forest", default=FALSE, action="store_true"),
make_option("--combine_other_non_forest", default=FALSE, action="store_true"),
make_option("--skip_ml_if_md_is_diag_dominant", default=FALSE, action="store_true"),
make_option("--use_md_as_initial_values_for_em", default=FALSE, action="store_true"))
opt <- parse_args(OptionParser(option_list=opt_list))
message("command line options: ", paste(sprintf("%s=%s", names(opt), opt), collapse=", "))
mapbiomas <- stack(opt$mapbiomas_raster_path)
nlayers(mapbiomas)
window <- getValuesBlock(mapbiomas,
row=opt$row,
col=opt$col,
nrows=opt$width_in_pixels,
ncols=opt$width_in_pixels)
dim(window)
window_extent <- extent(mapbiomas, opt$row, opt$row + opt$width_in_pixels, opt$col, opt$col + opt$width_in_pixels)
window_raster<- raster(window_extent, crs=crs(mapbiomas), nrows=opt$width_in_pixels, ncols=opt$width_in_pixels)
for(time_index in c(1, 8)) {
values(window_raster) <- window[, time_index]
filename <- sprintf("./atlantic_forest_output/raster_window_%s_%s_width_%s_band_%s.tif", opt$row, opt$col, opt$width_in_pixels, time_index)
## These .tifs aren't used anywhere in the code, but it can be helpful to inspect these rasters in qgis
message("Writing ", filename)
writeRaster(window_raster, filename, overwrite=TRUE)
}
class_frequencies_before_combining <- round(table(window) / (nrow(window) * ncol(window)), 4)
pr_missing <- mean(is.na(window))
pr_water_or_sand <- mean(window %in% c(22, 33))
if(pr_missing > 0.9 || pr_water_or_sand > 0.5) {
message("Window ", opt$row, " ", opt$col, " is missing at rate ",
pr_missing, ", ",
pr_water_or_sand, " water or sand (averaging over all bands), ",
"skipping estimation")
quit()
}
n_years <- ncol(window)
for(time_index in seq_len(n_years)) {
pr_missing <- mean(is.na(window[, time_index]))
if(pr_missing > 0.9) {
message("Window ", opt$row, " ", opt$col, " is missing at rate ",
pr_missing, " at time index (i.e. band) ", time_index,
", skipping estimation")
quit()
}
}
fraction_missing_in_all_years <- mean(rowMeans(is.na(window)) == 1.0)
count_missing_in_all_years <- sum(rowMeans(is.na(window)) == 1.0)
message("Fraction of pixels missing in 100% of years in the original data: ", fraction_missing_in_all_years)
## When constructing our panel (for estimation), we will only consider pixels that contain at least one non-missing observation
## in the original data. This will remove pixels in the ocean and pixels outside of the Atlantic forest region
valid_pixel_index <- rowMeans(is.na(window)) < 1.0
## Combine classes
## Class 12 (grassland) is optionally combined with class 3 (forest)
if(opt$grassland_as_forest) window[window %in% 12] <- 3
## Combine classes
## Classes 4 (savanna formation) and 9 (forest plantation) are combined with class 3 (forest)
window[window %in% c(4, 9)] <- 3
## Combine classes
## Class 11 (wetlands), class 22 (sand), and class 29 (rocky outcrop) are combined with class 33 (rivers and lakes)
window[window %in% c(11, 22, 29)] <- 33
## Combine classes
## Class 13 (other non-forest) is combined with class 33 (already a combination of wetlands, sand, rivers and lakes)
if(opt$combine_other_non_forest) window[window %in% 13] <- 33
## See https://mapbiomas-br-site.s3.amazonaws.com/downloads/Colecction%206/Cod_Class_legenda_Col6_MapBiomas_BR.pdf
unique_mapbiomas_classes <- sort(unique(c(window, recursive=TRUE)))
rare_mapbiomas_classes <- vector("numeric")
for(class in unique_mapbiomas_classes) {
if(mean(window == class, na.rm=TRUE) < opt$class_frequency_cutoff) {
rare_mapbiomas_classes <- c(rare_mapbiomas_classes, class)
}
}
## We are going to recode rare classes as NA
## This is effectively assuming that all observations of rare classes must be misclassifications
## In most windows we will keep classes 3 and 21 (forest and mosaic of pasture + agriculture)
mapbiomas_classes_to_keep <- unique_mapbiomas_classes[!unique_mapbiomas_classes %in% rare_mapbiomas_classes]
message("Keeping the following classes:")
print(mapbiomas_classes_to_keep)
table(window)
class_frequencies <- round(table(window) / (nrow(window) * ncol(window)), 4)
## Careful, there can be missing values (even before we recode rare classes as NA)!
message("Missing value counts in the original data (fraction and sum):")
mean(is.na(c(window, recursive=TRUE)))
sum(is.na(c(window, recursive=TRUE)))
## Note: the code expects observations to be in the set {1, 2, 3, ..., |Y|},
## So we need to recode sets of classes like {3, 21} to {1, 2} for example
window_recoded <- window
for(i in seq_along(mapbiomas_classes_to_keep)) {
class <- mapbiomas_classes_to_keep[i]
window_recoded[window == class] <- i
}
## Rare classes are recoded as NA
message("Recoding the following rare classes as NA:")
print(rare_mapbiomas_classes)
window_recoded[window %in% rare_mapbiomas_classes] <- NA
table(window)
table(window_recoded)
mean(is.na(window_recoded))
message("Fraction of pixels with at least one missing value in the recoded data:")
mean(rowMeans(is.na(window_recoded)) > 0)
message("Fraction of pixels missing in >50% of years in the recoded data:")
mean(rowMeans(is.na(window_recoded)) > .5)
message("Fraction of pixels missing in 100% of years in the recoded data:")
mean(rowMeans(is.na(window_recoded)) == 1.0)
full_panel <- apply(window_recoded, 1, function(y) list(y=as.vector(y), time=seq_along(y)))
## Using prob=valid_pixel_index excludes pixels that have 100% missing observations in the original data
panel <- sample(full_panel, size=length(full_panel) * opt$subsample, replace=FALSE, prob=valid_pixel_index)
## We no longer need the full window at this point, rm it to save memory
rm(window)
rm(full_panel)
gc()
## These aren't actually used in optimization,
## they're just used to create other parameters of the same shape/dimension/time horizon
n_states <- length(mapbiomas_classes_to_keep)
n_time_periods <- ncol(window_recoded)
dummy_pr_transition <- 0.2 * matrix(1/n_states, nrow=n_states, ncol=n_states) + 0.8 * diag(n_states)
dummy_pr_y <- 0.2 * matrix(1/n_states, n_states, n_states) + 0.8 * diag(n_states)
dummy_params <- list(mu=rep(1/n_states, n_states),
P_list=rep(list(dummy_pr_transition), n_time_periods - 1),
pr_y=dummy_pr_y,
n_components=n_states)
estimates <- get_em_and_min_dist_estimates_random_initialization(params=dummy_params,
panel=panel,
n_random_starts_em=opt$n_random_starts_em,
n_random_starts_md=opt$n_random_starts_md,
diag_min=0.8,
diag_max=0.95,
skip_ml_if_md_is_diag_dominant=opt$skip_ml_if_md_is_diag_dominant,
use_md_as_initial_values_for_em=opt$use_md_as_initial_values_for_em)
estimates$P_hat_frequency <- lapply(estimates$M_Y_joint_hat, get_transition_probs_from_M_S_joint)
estimates$mapbiomas_classes_to_keep <- mapbiomas_classes_to_keep
estimates$rare_mapbiomas_classes <- rare_mapbiomas_classes
estimates$class_frequencies <- class_frequencies
estimates$class_frequencies_before_combining <- class_frequencies_before_combining
estimates$options <- opt
estimates$window_bbox <- as.data.frame(bbox(window_extent))
estimates$fraction_missing_in_all_years <- fraction_missing_in_all_years
estimates$count_missing_in_all_years <- count_missing_in_all_years
filename <- sprintf("./atlantic_forest_output/estimates_window_%s_%s_width_%s_class_frequency_cutoff_%s_subsample_%s_combined_classes%s%s%s%s.rds",
opt$row, opt$col, opt$width_in_pixels, opt$class_frequency_cutoff, opt$subsample,
ifelse(opt$grassland_as_forest, "_grassland_as_forest", ""),
ifelse(opt$combine_other_non_forest, "_combine_other_non_forest", ""),
ifelse(opt$skip_ml_if_md_is_diag_dominant, "_skip_ml_if_md_is_diag_dominant", ""),
ifelse(opt$use_md_as_initial_values_for_em, "_use_md_as_initial_values_for_em", ""))
message("Saving ", filename)
saveRDS(estimates, file=filename)
for(class_index in seq_along(estimates$mapbiomas_classes_to_keep)) {
class <- estimates$mapbiomas_classes_to_keep[class_index]
## Diagonals of the transition matrix (for example, Pr[ forest at t+1 | forest at t ])
P_hat_frequency <- sapply(estimates$P_hat_frequency, function(P) P[class_index, class_index])
P_hat_md <- sapply(estimates$min_dist_params_hat_best_objfn$P_list, function(P) P[class_index, class_index])
if("em_params_hat_best_likelihood" %in% names(estimates)) {
P_hat_ml <- sapply(estimates$em_params_hat_best_likelihood$P_list, function(P) P[class_index, class_index])
} else {
P_hat_ml <- rep(NA, length(P_hat_md))
}
df <- data.table(time_index=seq_along(P_hat_frequency), P_hat_frequency=P_hat_frequency, P_hat_md, P_hat_ml)
df_melted <- melt(df, id.vars="time_index")
title <- sprintf("Probability of Remaining in Mapbiomas Class %s", class) # TODO Window info in title?
p <- (ggplot(df_melted, aes(x=time_index, y=value, group=variable, color=variable)) +
geom_point() +
geom_line() +
ggtitle(title) +
theme(plot.title = element_text(hjust = 0.5)) +
scale_color_discrete("algorithm") +
ylab("probability") +
theme_bw())
filename <- sprintf("transition_matrix_diagonals_window_%s_%s_width_%s_class_%s_with_combined_classes_%s.png",
opt$row, opt$col, opt$width_in_pixels, class, ifelse(opt$grassland_as_forest, "grassland_as_forest", ""))
ggsave(p, filename=filename, width=6, height=4, units="in")
}
|
library(ggplot2)
library(png)
library(grid)
library(hexSticker)
#' @param x x offset of the hexagon's center
#'
#' @param y y offset of the hexagon's center
#'
#' @param radius the radius (side length) of the hexagon.
#'
#' @param from_radius from where should the segment be drawn? defaults to the center
#'
#' @param to_radius to where should the segment be drawn? defaults to the radius
#'
#' @param from_angle from which angle should we draw?
#'
#' @param to_angle to which angle should we draw?
#'
#' @param fill fill color
#'
#' @param color line color
#'
#' @param size size of the line?
hex_segment2 <- function(x = 1, y = 1, radius = 1, from_radius = 0,
to_radius = radius, from_angle = 30, to_angle = 90,
fill = NA, color = NA, size = 1.2) {
from_angle <- from_angle * pi / 180
to_angle <- to_angle * pi / 180
coords <- data.frame(x = x + c(from_radius * cos(from_angle),
to_radius * cos(from_angle),
to_radius * cos(to_angle),
from_radius * cos(to_angle)),
y = y + c(from_radius * sin(from_angle),
to_radius * sin(from_angle),
to_radius * sin(to_angle),
from_radius * sin(to_angle))
)
geom_polygon(aes(x = coords$x, y = coords$y), data = coords,
fill = fill, color = color, size = size)
}
## Summer Sky
col_text <- "#ffffff"
col_border <- "#e8e8e8" # Mercury
col_bg <- "#1e8bc3" # Summer Sky
img <- readPNG("./images/CSAMA2023.png")
img <- rasterGrob(img, width = 1, x = 0.5, y = 0.5,
interpolate = FALSE)
hex <- ggplot() +
geom_hexagon(size = 1.2, fill = col_bg, color = NA) + # full
geom_subview(subview = img, x = 0.98, y = 0.99,
width = 1.7, height = 1.7) +
hex_segment2(size = 0, fill = col_border, # right upper
from_radius = 0.94, to_radius = 1,
from_angle = 330, to_angle = 30) +
hex_segment2(size = 0, fill = col_border,
from_radius = 0.94, to_radius = 1,
from_angle = 30, to_angle = 90) +
hex_segment2(size = 0, fill = col_border,
from_radius = 0.94, to_radius = 1,
from_angle = 90, to_angle = 150) +
hex_segment2(size = 0, fill = col_border,
from_radius = 0.94, to_radius = 1,
from_angle = 150, to_angle = 210) +
hex_segment2(size = 0, fill = col_border,
from_radius = 0.94, to_radius = 1,
from_angle = 210, to_angle = 270) +
hex_segment2(size = 0, fill = col_border,
from_radius = 0.94, to_radius = 1,
from_angle = 270, to_angle = 330) +
geom_url(url = "CSAMA2023", x = 0.22, y = 1.24,
family = "Aller", size = 15.5, color = col_border) +
geom_url(url = "www.bioconductor.org", size = 7.17, color = "#000000",
x = 0.956, y = 0.104) +
geom_url(url = "www.bioconductor.org", size = 7, color = "#ffffff",
x = 0.95, y = 0.11) +
theme_sticker()
save_sticker("CSAMA2023.png", hex, dpi = 300)
## Rainbow sticker
red <- "#ff0000"
orange <- "#ffa52c"
yellow <- "#ead018"
green <- "#007e15"
blue <- "#0505f9"
purple <- "#86007d"
hex <- ggplot() +
geom_hexagon(size = 1.2, fill = col_bg, color = NA) + # full
geom_subview(subview = img, x = 0.98, y = 0.99,
width = 1.7, height = 1.7) +
hex_segment2(size = 0, fill = red, # right upper
from_radius = 0.94, to_radius = 1,
from_angle = 330, to_angle = 30) +
hex_segment2(size = 0, fill = orange,
from_radius = 0.94, to_radius = 1,
from_angle = 30, to_angle = 90) +
hex_segment2(size = 0, fill = yellow,
from_radius = 0.94, to_radius = 1,
from_angle = 90, to_angle = 150) +
hex_segment2(size = 0, fill = green,
from_radius = 0.94, to_radius = 1,
from_angle = 150, to_angle = 210) +
hex_segment2(size = 0, fill = blue,
from_radius = 0.94, to_radius = 1,
from_angle = 210, to_angle = 270) +
hex_segment2(size = 0, fill = purple,
from_radius = 0.94, to_radius = 1,
from_angle = 270, to_angle = 330) +
geom_url(url = "CSAMA2023", x = 0.22, y = 1.24,
family = "Aller", size = 15.5, color = col_border) +
geom_url(url = "www.bioconductor.org", size = 7.17, color = "#000000",
x = 0.956, y = 0.104) +
geom_url(url = "www.bioconductor.org", size = 7, color = "#ffffff",
x = 0.95, y = 0.11) +
theme_sticker()
save_sticker("CSAMA2023-a.png", hex, dpi = 300)
lb <- "#5bcefa"
lr <- "#f5a9b8"
lg <- "#d9d9d9"
hex <- ggplot() +
geom_hexagon(size = 1.2, fill = col_bg, color = NA) + # full
geom_subview(subview = img, x = 0.98, y = 0.99,
width = 1.7, height = 1.7) +
hex_segment2(size = 0, fill = lg, # right upper
from_radius = 0.94, to_radius = 1,
from_angle = 330, to_angle = 30) +
hex_segment2(size = 0, fill = lr,
from_radius = 0.94, to_radius = 1,
from_angle = 30, to_angle = 90) +
hex_segment2(size = 0, fill = lb,
from_radius = 0.94, to_radius = 1,
from_angle = 90, to_angle = 150) +
hex_segment2(size = 0, fill = lg,
from_radius = 0.94, to_radius = 1,
from_angle = 150, to_angle = 210) +
hex_segment2(size = 0, fill = lr,
from_radius = 0.94, to_radius = 1,
from_angle = 210, to_angle = 270) +
hex_segment2(size = 0, fill = lb,
from_radius = 0.94, to_radius = 1,
from_angle = 270, to_angle = 330) +
geom_url(url = "CSAMA2023", x = 0.22, y = 1.24,
family = "Aller", size = 15.5, color = col_border) +
geom_url(url = "www.bioconductor.org", size = 7.17, color = "#000000",
x = 0.956, y = 0.104) +
geom_url(url = "www.bioconductor.org", size = 7, color = "#ffffff",
x = 0.95, y = 0.11) +
theme_sticker()
save_sticker("CSAMA2023-b.png", hex, dpi = 300)
| /events/CSAMA/2023/CSAMA2023.R | permissive | Bioconductor/BiocStickers | R | false | false | 6,397 | r | library(ggplot2)
library(png)
library(grid)
library(hexSticker)
#' @param x x offset of the hexagon's center
#'
#' @param y y offset of the hexagon's center
#'
#' @param radius the radius (side length) of the hexagon.
#'
#' @param from_radius from where should the segment be drawn? defaults to the center
#'
#' @param to_radius to where should the segment be drawn? defaults to the radius
#'
#' @param from_angle from which angle should we draw?
#'
#' @param to_angle to which angle should we draw?
#'
#' @param fill fill color
#'
#' @param color line color
#'
#' @param size size of the line?
hex_segment2 <- function(x = 1, y = 1, radius = 1, from_radius = 0,
to_radius = radius, from_angle = 30, to_angle = 90,
fill = NA, color = NA, size = 1.2) {
from_angle <- from_angle * pi / 180
to_angle <- to_angle * pi / 180
coords <- data.frame(x = x + c(from_radius * cos(from_angle),
to_radius * cos(from_angle),
to_radius * cos(to_angle),
from_radius * cos(to_angle)),
y = y + c(from_radius * sin(from_angle),
to_radius * sin(from_angle),
to_radius * sin(to_angle),
from_radius * sin(to_angle))
)
geom_polygon(aes(x = coords$x, y = coords$y), data = coords,
fill = fill, color = color, size = size)
}
## Summer Sky
col_text <- "#ffffff"
col_border <- "#e8e8e8" # Mercury
col_bg <- "#1e8bc3" # Summer Sky
img <- readPNG("./images/CSAMA2023.png")
img <- rasterGrob(img, width = 1, x = 0.5, y = 0.5,
interpolate = FALSE)
hex <- ggplot() +
geom_hexagon(size = 1.2, fill = col_bg, color = NA) + # full
geom_subview(subview = img, x = 0.98, y = 0.99,
width = 1.7, height = 1.7) +
hex_segment2(size = 0, fill = col_border, # right upper
from_radius = 0.94, to_radius = 1,
from_angle = 330, to_angle = 30) +
hex_segment2(size = 0, fill = col_border,
from_radius = 0.94, to_radius = 1,
from_angle = 30, to_angle = 90) +
hex_segment2(size = 0, fill = col_border,
from_radius = 0.94, to_radius = 1,
from_angle = 90, to_angle = 150) +
hex_segment2(size = 0, fill = col_border,
from_radius = 0.94, to_radius = 1,
from_angle = 150, to_angle = 210) +
hex_segment2(size = 0, fill = col_border,
from_radius = 0.94, to_radius = 1,
from_angle = 210, to_angle = 270) +
hex_segment2(size = 0, fill = col_border,
from_radius = 0.94, to_radius = 1,
from_angle = 270, to_angle = 330) +
geom_url(url = "CSAMA2023", x = 0.22, y = 1.24,
family = "Aller", size = 15.5, color = col_border) +
geom_url(url = "www.bioconductor.org", size = 7.17, color = "#000000",
x = 0.956, y = 0.104) +
geom_url(url = "www.bioconductor.org", size = 7, color = "#ffffff",
x = 0.95, y = 0.11) +
theme_sticker()
save_sticker("CSAMA2023.png", hex, dpi = 300)
## Rainbow sticker
red <- "#ff0000"
orange <- "#ffa52c"
yellow <- "#ead018"
green <- "#007e15"
blue <- "#0505f9"
purple <- "#86007d"
hex <- ggplot() +
geom_hexagon(size = 1.2, fill = col_bg, color = NA) + # full
geom_subview(subview = img, x = 0.98, y = 0.99,
width = 1.7, height = 1.7) +
hex_segment2(size = 0, fill = red, # right upper
from_radius = 0.94, to_radius = 1,
from_angle = 330, to_angle = 30) +
hex_segment2(size = 0, fill = orange,
from_radius = 0.94, to_radius = 1,
from_angle = 30, to_angle = 90) +
hex_segment2(size = 0, fill = yellow,
from_radius = 0.94, to_radius = 1,
from_angle = 90, to_angle = 150) +
hex_segment2(size = 0, fill = green,
from_radius = 0.94, to_radius = 1,
from_angle = 150, to_angle = 210) +
hex_segment2(size = 0, fill = blue,
from_radius = 0.94, to_radius = 1,
from_angle = 210, to_angle = 270) +
hex_segment2(size = 0, fill = purple,
from_radius = 0.94, to_radius = 1,
from_angle = 270, to_angle = 330) +
geom_url(url = "CSAMA2023", x = 0.22, y = 1.24,
family = "Aller", size = 15.5, color = col_border) +
geom_url(url = "www.bioconductor.org", size = 7.17, color = "#000000",
x = 0.956, y = 0.104) +
geom_url(url = "www.bioconductor.org", size = 7, color = "#ffffff",
x = 0.95, y = 0.11) +
theme_sticker()
save_sticker("CSAMA2023-a.png", hex, dpi = 300)
lb <- "#5bcefa"
lr <- "#f5a9b8"
lg <- "#d9d9d9"
hex <- ggplot() +
geom_hexagon(size = 1.2, fill = col_bg, color = NA) + # full
geom_subview(subview = img, x = 0.98, y = 0.99,
width = 1.7, height = 1.7) +
hex_segment2(size = 0, fill = lg, # right upper
from_radius = 0.94, to_radius = 1,
from_angle = 330, to_angle = 30) +
hex_segment2(size = 0, fill = lr,
from_radius = 0.94, to_radius = 1,
from_angle = 30, to_angle = 90) +
hex_segment2(size = 0, fill = lb,
from_radius = 0.94, to_radius = 1,
from_angle = 90, to_angle = 150) +
hex_segment2(size = 0, fill = lg,
from_radius = 0.94, to_radius = 1,
from_angle = 150, to_angle = 210) +
hex_segment2(size = 0, fill = lr,
from_radius = 0.94, to_radius = 1,
from_angle = 210, to_angle = 270) +
hex_segment2(size = 0, fill = lb,
from_radius = 0.94, to_radius = 1,
from_angle = 270, to_angle = 330) +
geom_url(url = "CSAMA2023", x = 0.22, y = 1.24,
family = "Aller", size = 15.5, color = col_border) +
geom_url(url = "www.bioconductor.org", size = 7.17, color = "#000000",
x = 0.956, y = 0.104) +
geom_url(url = "www.bioconductor.org", size = 7, color = "#ffffff",
x = 0.95, y = 0.11) +
theme_sticker()
save_sticker("CSAMA2023-b.png", hex, dpi = 300)
|
#' @export
store_class_repository.gcp <- function(repository, store, format) {
format <- gsub(pattern = "\\&.*$", replacement = "", x = format)
c(
sprintf("tar_gcp_%s", format),
"tar_gcp",
"tar_cloud",
if_any("tar_external" %in% class(store), character(0), "tar_external"),
class(store)
)
}
#' @export
store_assert_repository_setting.gcp <- function(repository) {
}
#' @export
store_produce_path.tar_gcp <- function(store, name, object, path_store) {
store_produce_gcp_path(
store = store,
name = name,
object = object,
path_store = path_store
)
}
store_produce_gcp_path <- function(store, name, object, path_store) {
bucket <- store$resources$gcp$bucket %|||% store$resources$bucket
tar_assert_nonempty(bucket)
tar_assert_chr(bucket)
tar_assert_scalar(bucket)
tar_assert_nzchar(bucket)
root_prefix <- store$resources$gcp$prefix %|||%
store$resources$prefix %|||%
path_store_default()
prefix <- path_objects_dir(path_store = root_prefix)
tar_assert_nonempty(prefix)
tar_assert_chr(prefix)
tar_assert_scalar(prefix)
key <- file.path(prefix, name)
tar_assert_nzchar(key)
bucket <- paste0("bucket=", bucket)
key <- paste0("key=", key)
c(bucket, key)
}
store_gcp_bucket <- function(path) {
store_gcp_path_field(path = path, pattern = "^bucket=")
}
store_gcp_key <- function(path) {
store_gcp_path_field(path = path, pattern = "^key=")
}
store_gcp_version <- function(path) {
out <- store_gcp_path_field(path = path, pattern = "^version=")
if_any(length(out) && nzchar(out), out, NULL)
}
store_gcp_path_field <- function(path, pattern) {
keyvalue_field(x = path, pattern = pattern)
}
# Semi-automated tests of GCP GCS integration live in tests/gcp/. # nolint
# These tests should not be fully automated because they
# automatically create buckets and upload data,
# which could put an unexpected and unfair burden on
# external contributors from the open source community.
# nocov start
#' @export
store_read_object.tar_gcp <- function(store) {
path <- store$file$path
key <- store_gcp_key(path)
bucket <- store_gcp_bucket(path)
scratch <- path_scratch_temp_network(pattern = basename(store_gcp_key(path)))
on.exit(unlink(scratch))
dir_create(dirname(scratch))
gcp_gcs_download(
key = key,
bucket = bucket,
file = scratch,
version = store_gcp_version(path),
verbose = store$resources$gcp$verbose,
max_tries = store$resources$gcp$max_tries
)
store_convert_object(store, store_read_path(store, scratch))
}
#' @export
store_exist_object.tar_gcp <- function(store, name = NULL) {
path <- store$file$path
gcp_gcs_exists(
key = store_gcp_key(path),
bucket = store_gcp_bucket(path),
version = store_gcp_version(path),
verbose = store$resources$gcp$verbose %|||% FALSE,
max_tries = store$resources$gcp$max_tries %|||% 5L
)
}
#' @export
store_delete_object.tar_gcp <- function(store, name = NULL) {
path <- store$file$path
key <- store_gcp_key(path)
bucket <- store_gcp_bucket(path)
version <- store_gcp_version(path)
message <- paste(
"could not delete target %s from gcp bucket %s key %s.",
"Either delete the object manually in the gcp web console",
"or call tar_invalidate(%s) to prevent the targets package",
"from trying to delete it.\nMessage: "
)
message <- sprintf(message, name, bucket, key, name)
tryCatch(
gcp_gcs_delete(
key = key,
bucket = bucket,
version = version,
verbose = store$resources$gcp$verbose %|||% FALSE,
max_tries = store$resources$gcp$max_tries %|||% 5L
),
error = function(condition) {
tar_throw_validate(message, conditionMessage(condition))
}
)
}
#' @export
store_upload_object.tar_gcp <- function(store) {
on.exit(unlink(store$file$stage, recursive = TRUE, force = TRUE))
store_upload_object_gcp(store)
}
store_upload_object_gcp <- function(store) {
key <- store_gcp_key(store$file$path)
bucket <- store_gcp_bucket(store$file$path)
head <- if_any(
file_exists_stage(store$file),
gcp_gcs_upload(
file = store$file$stage,
key = key,
bucket = bucket,
metadata = list("targets-hash" = store$file$hash),
predefined_acl = store$resources$gcp$predefined_acl %|||% "private",
verbose = store$resources$gcp$verbose %|||% FALSE,
max_tries = store$resources$gcp$max_tries %|||% 5L
),
tar_throw_file(
"Cannot upload non-existent gcp staging file ",
store$file$stage,
" to key ",
key,
". The target probably encountered an error."
)
)
path <- grep(
pattern = "^version=",
x = store$file$path,
value = TRUE,
invert = TRUE
)
store$file$path <- c(path, paste0("version=", head$generation))
invisible()
}
#' @export
store_ensure_correct_hash.tar_gcp <- function(store, storage, deployment) {
}
#' @export
store_has_correct_hash.tar_gcp <- function(store) {
hash <- store_gcp_hash(store = store)
!is.null(hash) && identical(hash, store$file$hash)
}
store_gcp_hash <- function(store) {
path <- store$file$path
head <- gcp_gcs_head(
key = store_gcp_key(path),
bucket = store_gcp_bucket(path),
version = store_gcp_version(path),
verbose = store$resources$gcp$verbose %|||% FALSE,
max_tries = store$resources$gcp$max_tries %|||% 5L
)
head$metadata[["targets-hash"]]
}
# nocov end
#' @export
store_get_packages.tar_gcp <- function(store) {
c("googleCloudStorageR", NextMethod())
}
| /R/class_gcp.R | permissive | ropensci/targets | R | false | false | 5,506 | r | #' @export
store_class_repository.gcp <- function(repository, store, format) {
format <- gsub(pattern = "\\&.*$", replacement = "", x = format)
c(
sprintf("tar_gcp_%s", format),
"tar_gcp",
"tar_cloud",
if_any("tar_external" %in% class(store), character(0), "tar_external"),
class(store)
)
}
#' @export
store_assert_repository_setting.gcp <- function(repository) {
}
#' @export
store_produce_path.tar_gcp <- function(store, name, object, path_store) {
store_produce_gcp_path(
store = store,
name = name,
object = object,
path_store = path_store
)
}
store_produce_gcp_path <- function(store, name, object, path_store) {
bucket <- store$resources$gcp$bucket %|||% store$resources$bucket
tar_assert_nonempty(bucket)
tar_assert_chr(bucket)
tar_assert_scalar(bucket)
tar_assert_nzchar(bucket)
root_prefix <- store$resources$gcp$prefix %|||%
store$resources$prefix %|||%
path_store_default()
prefix <- path_objects_dir(path_store = root_prefix)
tar_assert_nonempty(prefix)
tar_assert_chr(prefix)
tar_assert_scalar(prefix)
key <- file.path(prefix, name)
tar_assert_nzchar(key)
bucket <- paste0("bucket=", bucket)
key <- paste0("key=", key)
c(bucket, key)
}
store_gcp_bucket <- function(path) {
store_gcp_path_field(path = path, pattern = "^bucket=")
}
store_gcp_key <- function(path) {
store_gcp_path_field(path = path, pattern = "^key=")
}
store_gcp_version <- function(path) {
out <- store_gcp_path_field(path = path, pattern = "^version=")
if_any(length(out) && nzchar(out), out, NULL)
}
store_gcp_path_field <- function(path, pattern) {
keyvalue_field(x = path, pattern = pattern)
}
# Semi-automated tests of GCP GCS integration live in tests/gcp/. # nolint
# These tests should not be fully automated because they
# automatically create buckets and upload data,
# which could put an unexpected and unfair burden on
# external contributors from the open source community.
# nocov start
#' @export
store_read_object.tar_gcp <- function(store) {
path <- store$file$path
key <- store_gcp_key(path)
bucket <- store_gcp_bucket(path)
scratch <- path_scratch_temp_network(pattern = basename(store_gcp_key(path)))
on.exit(unlink(scratch))
dir_create(dirname(scratch))
gcp_gcs_download(
key = key,
bucket = bucket,
file = scratch,
version = store_gcp_version(path),
verbose = store$resources$gcp$verbose,
max_tries = store$resources$gcp$max_tries
)
store_convert_object(store, store_read_path(store, scratch))
}
#' @export
store_exist_object.tar_gcp <- function(store, name = NULL) {
path <- store$file$path
gcp_gcs_exists(
key = store_gcp_key(path),
bucket = store_gcp_bucket(path),
version = store_gcp_version(path),
verbose = store$resources$gcp$verbose %|||% FALSE,
max_tries = store$resources$gcp$max_tries %|||% 5L
)
}
#' @export
store_delete_object.tar_gcp <- function(store, name = NULL) {
path <- store$file$path
key <- store_gcp_key(path)
bucket <- store_gcp_bucket(path)
version <- store_gcp_version(path)
message <- paste(
"could not delete target %s from gcp bucket %s key %s.",
"Either delete the object manually in the gcp web console",
"or call tar_invalidate(%s) to prevent the targets package",
"from trying to delete it.\nMessage: "
)
message <- sprintf(message, name, bucket, key, name)
tryCatch(
gcp_gcs_delete(
key = key,
bucket = bucket,
version = version,
verbose = store$resources$gcp$verbose %|||% FALSE,
max_tries = store$resources$gcp$max_tries %|||% 5L
),
error = function(condition) {
tar_throw_validate(message, conditionMessage(condition))
}
)
}
#' @export
store_upload_object.tar_gcp <- function(store) {
on.exit(unlink(store$file$stage, recursive = TRUE, force = TRUE))
store_upload_object_gcp(store)
}
store_upload_object_gcp <- function(store) {
key <- store_gcp_key(store$file$path)
bucket <- store_gcp_bucket(store$file$path)
head <- if_any(
file_exists_stage(store$file),
gcp_gcs_upload(
file = store$file$stage,
key = key,
bucket = bucket,
metadata = list("targets-hash" = store$file$hash),
predefined_acl = store$resources$gcp$predefined_acl %|||% "private",
verbose = store$resources$gcp$verbose %|||% FALSE,
max_tries = store$resources$gcp$max_tries %|||% 5L
),
tar_throw_file(
"Cannot upload non-existent gcp staging file ",
store$file$stage,
" to key ",
key,
". The target probably encountered an error."
)
)
path <- grep(
pattern = "^version=",
x = store$file$path,
value = TRUE,
invert = TRUE
)
store$file$path <- c(path, paste0("version=", head$generation))
invisible()
}
#' @export
store_ensure_correct_hash.tar_gcp <- function(store, storage, deployment) {
}
#' @export
store_has_correct_hash.tar_gcp <- function(store) {
hash <- store_gcp_hash(store = store)
!is.null(hash) && identical(hash, store$file$hash)
}
store_gcp_hash <- function(store) {
path <- store$file$path
head <- gcp_gcs_head(
key = store_gcp_key(path),
bucket = store_gcp_bucket(path),
version = store_gcp_version(path),
verbose = store$resources$gcp$verbose %|||% FALSE,
max_tries = store$resources$gcp$max_tries %|||% 5L
)
head$metadata[["targets-hash"]]
}
# nocov end
#' @export
store_get_packages.tar_gcp <- function(store) {
c("googleCloudStorageR", NextMethod())
}
|
library(ucbthesis)
### Name: rnw2pdf
### Title: Render an Rnw file into a PDF
### Aliases: rnw2pdf
### ** Examples
## Not run:
##D setwd("inst/knitr")
##D rnw2pdf()
## End(Not run)
| /data/genthat_extracted_code/ucbthesis/examples/rnw2pdf.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 189 | r | library(ucbthesis)
### Name: rnw2pdf
### Title: Render an Rnw file into a PDF
### Aliases: rnw2pdf
### ** Examples
## Not run:
##D setwd("inst/knitr")
##D rnw2pdf()
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/permutationOperators.R
\name{recombinationPermutationPositionBased}
\alias{recombinationPermutationPositionBased}
\title{Position Based Crossover (POS) for Permutations}
\usage{
recombinationPermutationPositionBased(population, parameters)
}
\arguments{
\item{population}{List of permutations}
\item{parameters}{not used}
}
\value{
population of recombined offspring
}
\description{
Given a population of permutations, this function recombines each
individual with another individual.
Note, that \code{\link{optimEA}} will not pass the whole population
to recombination functions, but only the chosen parents.
}
| /man/recombinationPermutationPositionBased.Rd | no_license | cran/CEGO | R | false | true | 691 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/permutationOperators.R
\name{recombinationPermutationPositionBased}
\alias{recombinationPermutationPositionBased}
\title{Position Based Crossover (POS) for Permutations}
\usage{
recombinationPermutationPositionBased(population, parameters)
}
\arguments{
\item{population}{List of permutations}
\item{parameters}{not used}
}
\value{
population of recombined offspring
}
\description{
Given a population of permutations, this function recombines each
individual with another individual.
Note, that \code{\link{optimEA}} will not pass the whole population
to recombination functions, but only the chosen parents.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PRISMA_flowdiagram.R
\name{PRISMA_save}
\alias{PRISMA_save}
\title{Save PRISMA2020 flow diagram}
\usage{
PRISMA_save(plotobj)
}
\arguments{
\item{plotobj}{A plot produced using PRISMA_flowdiagram().}
}
\value{
A flow diagram plot as an html file, with embedded links and
tooltips if interactive=TRUE in PRISMA_flowdiagram() and if tooltips
are provided in the data upload, respectively.
}
\description{
Save the html output from PRISMA_flowdiagram() to the
working directory.
}
\examples{
\dontrun{
data <- read.csv(file.choose());
data <- read_PRISMAdata(data);
attach(data);
plot <- PRISMA_flowdiagram(data,
fontsize = 12,
interactive = TRUE,
previous = TRUE,
other = TRUE)
PRISMA_save(plot, format = 'pdf')
}
}
| /man/PRISMA_save.Rd | no_license | yasutakakuniyoshi/PRISMA2020 | R | false | true | 857 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PRISMA_flowdiagram.R
\name{PRISMA_save}
\alias{PRISMA_save}
\title{Save PRISMA2020 flow diagram}
\usage{
PRISMA_save(plotobj)
}
\arguments{
\item{plotobj}{A plot produced using PRISMA_flowdiagram().}
}
\value{
A flow diagram plot as an html file, with embedded links and
tooltips if interactive=TRUE in PRISMA_flowdiagram() and if tooltips
are provided in the data upload, respectively.
}
\description{
Save the html output from PRISMA_flowdiagram() to the
working directory.
}
\examples{
\dontrun{
data <- read.csv(file.choose());
data <- read_PRISMAdata(data);
attach(data);
plot <- PRISMA_flowdiagram(data,
fontsize = 12,
interactive = TRUE,
previous = TRUE,
other = TRUE)
PRISMA_save(plot, format = 'pdf')
}
}
|
#Use "dplyr"
#Install.packages("dplyr")
library("dplyr")
#Load in SwissData from data set from data folder and view it to understand what is in it.
swiss.data <- read.csv("data/SwissData.csv")
View(swiss.data)
#Add a column (using dpylr) that is the absolute difference between Education and Examination and call it
# Educated.Score
swiss.data <- mutate(swiss.data, Educated.Score = Education/Examination)
#Which area(s) had the largest difference
region.large.diff <- filter(swiss.data, Educated.Score == max(Educated.Score))
#Find which region has the highest percent of men in agriculture and retunr only the
#percent and region name. Use pipe operators to accomplish this.
highest.agriculture <- swiss.data %>%
filter(Agriculture == max(Agriculture)) %>%
select(Region, Agriculture)
#Find the average of all infant.mortality rates and create a column (Mortality.Difference)
# showing the difference between a regions mortality rate and the mean. Arrange the dataframe in
# Descending order based on this new column. Use pipe operators.
swiss.data <- mutate(swiss.data, Mortality.Difference = mean(Infant.Mortality) - Infant.Mortality) %>%
arrange(-Mortality.Difference)
# Create a new data frame that only is that of regions that have a Infant mortality rate less than the
# mean. Have this data frame only have the regions name, education and mortality rate.
mortality.less <- swiss.data %>%
filter(Infant.Mortality < mean(Infant.Mortality)) %>%
select(Region, Education, Infant.Mortality)
#Filter one of the columns based on a question that you may have (which regions have a higher
#education rate, etc.) and write that to a csv file
#Question: Which
# Create a function that can take in two different region names and compare them based on a statistic
# Of your choice (education, Examination, ect.) print out a statment describing which one is greater
# and return a data frame that holds the selected region and the compared variable. If your feeling adventurous
# also have your function write to a csv file.
| /exercise-8/exercise.R | permissive | monmonc/module10-dplyr | R | false | false | 2,154 | r | #Use "dplyr"
#Install.packages("dplyr")
library("dplyr")
#Load in SwissData from data set from data folder and view it to understand what is in it.
swiss.data <- read.csv("data/SwissData.csv")
View(swiss.data)
#Add a column (using dpylr) that is the absolute difference between Education and Examination and call it
# Educated.Score
swiss.data <- mutate(swiss.data, Educated.Score = Education/Examination)
#Which area(s) had the largest difference
region.large.diff <- filter(swiss.data, Educated.Score == max(Educated.Score))
#Find which region has the highest percent of men in agriculture and retunr only the
#percent and region name. Use pipe operators to accomplish this.
highest.agriculture <- swiss.data %>%
filter(Agriculture == max(Agriculture)) %>%
select(Region, Agriculture)
#Find the average of all infant.mortality rates and create a column (Mortality.Difference)
# showing the difference between a regions mortality rate and the mean. Arrange the dataframe in
# Descending order based on this new column. Use pipe operators.
swiss.data <- mutate(swiss.data, Mortality.Difference = mean(Infant.Mortality) - Infant.Mortality) %>%
arrange(-Mortality.Difference)
# Create a new data frame that only is that of regions that have a Infant mortality rate less than the
# mean. Have this data frame only have the regions name, education and mortality rate.
mortality.less <- swiss.data %>%
filter(Infant.Mortality < mean(Infant.Mortality)) %>%
select(Region, Education, Infant.Mortality)
#Filter one of the columns based on a question that you may have (which regions have a higher
#education rate, etc.) and write that to a csv file
#Question: Which
# Create a function that can take in two different region names and compare them based on a statistic
# Of your choice (education, Examination, ect.) print out a statment describing which one is greater
# and return a data frame that holds the selected region and the compared variable. If your feeling adventurous
# also have your function write to a csv file.
|
source("http://bioconductor.org/biocLite.R")
biocLite()
biocLite("EBImage")
library("EBImage")
Image <- readImage("Images/imagen1.png")
Image2 <- readImage("Images/imagen2.png")
Image3 <- readImage("Images/imagen3.png")
#display(Image)
print(Image)
print(Image2)
print(Image3) | /DisiMobile.ImagesAnalysis/Script.R | no_license | nelsonvalverdelt/DisiMobile | R | false | false | 276 | r | source("http://bioconductor.org/biocLite.R")
biocLite()
biocLite("EBImage")
library("EBImage")
Image <- readImage("Images/imagen1.png")
Image2 <- readImage("Images/imagen2.png")
Image3 <- readImage("Images/imagen3.png")
#display(Image)
print(Image)
print(Image2)
print(Image3) |
source("calc_residue.R")
get_doy_fr_0 <- function(td,flname){
ndata = flname #setdiff(colnames(td),c("year","month","day"))
doy = get_dayid(td$month,td$day)
day365 = max(365, max(doy))
fr_0 = matrix(NA,day365,length(ndata))
for( fli in 1:length(ndata) ){
for( dayi in 1:day365){
id = which(doy == dayi & !is.na(td[,ndata[fli]]))
fr_0[dayi,fli] = length(which(td[id,ndata[fli]] == 0)) / length(id)
}
}
#colnames(fr_0) = ndata
return(fr_0)
}
plt_doy_fr_0 <- function(data,flname){
td = data$obs
fr_0 = get_doy_fr_0(td,flname)
plot(fr_0, ylim = c(0,1), type = "l", xlab ="", ylab = "", axes = F, col = "grey")
axis(side = 1)
#axis(side = 4, las = 1)
box()
}
| /code_figures/assist_figure_fr0_doy.R | no_license | wangxsiyu/Lu_Drought_Identification | R | false | false | 708 | r | source("calc_residue.R")
get_doy_fr_0 <- function(td,flname){
ndata = flname #setdiff(colnames(td),c("year","month","day"))
doy = get_dayid(td$month,td$day)
day365 = max(365, max(doy))
fr_0 = matrix(NA,day365,length(ndata))
for( fli in 1:length(ndata) ){
for( dayi in 1:day365){
id = which(doy == dayi & !is.na(td[,ndata[fli]]))
fr_0[dayi,fli] = length(which(td[id,ndata[fli]] == 0)) / length(id)
}
}
#colnames(fr_0) = ndata
return(fr_0)
}
plt_doy_fr_0 <- function(data,flname){
td = data$obs
fr_0 = get_doy_fr_0(td,flname)
plot(fr_0, ylim = c(0,1), type = "l", xlab ="", ylab = "", axes = F, col = "grey")
axis(side = 1)
#axis(side = 4, las = 1)
box()
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SpThin.R, R/generics.R
\name{distname.SpThin}
\alias{distname}
\alias{distname.SpThin}
\title{Distance metric used to thin data.}
\usage{
\method{distname}{SpThin}(x)
distname(x, ...)
}
\arguments{
\item{x}{\code{SpThin} object.}
\item{...}{not used.}
}
\value{
\code{character} name of distance metric used to thin records.
}
\description{
This function returns the name of the distance metric used to thin datasets contained in a \code{SpThin} object.
}
\seealso{
\code{\link{SpThin}}.
# make thinned dataset using simulated data
result <- spThin(
runif(100, -5, -5),
runif(100, -5, -5),
dist=5,
method='heuristic',
1,
)
# show distance name of metric
distname(result)
export
}
| /man/distname.Rd | no_license | jeffreyhanson/spThin | R | false | false | 781 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SpThin.R, R/generics.R
\name{distname.SpThin}
\alias{distname}
\alias{distname.SpThin}
\title{Distance metric used to thin data.}
\usage{
\method{distname}{SpThin}(x)
distname(x, ...)
}
\arguments{
\item{x}{\code{SpThin} object.}
\item{...}{not used.}
}
\value{
\code{character} name of distance metric used to thin records.
}
\description{
This function returns the name of the distance metric used to thin datasets contained in a \code{SpThin} object.
}
\seealso{
\code{\link{SpThin}}.
# make thinned dataset using simulated data
result <- spThin(
runif(100, -5, -5),
runif(100, -5, -5),
dist=5,
method='heuristic',
1,
)
# show distance name of metric
distname(result)
export
}
|
#' @method ggrarecurve default
#' @importFrom ggplot2 ggplot geom_ribbon aes_string geom_smooth facet_wrap scale_y_continuous
#' @importFrom dplyr filter
#' @importFrom rlang .data
#' @importFrom scales squish
#' @importFrom Rmisc summarySE
#' @rdname ggrarecurve
#' @export
ggrarecurve.default <- function(obj,
sampleda, indexNames="Observe", linesize=0.5, facetnrow=1,
mapping=NULL, chunks=400, factorNames, factorLevels, se=FALSE,
method="lm", formula=y ~ log(x), ...){
if (is.null(mapping)){
obj <- stat_rare(data=obj, chunks=chunks, sampleda=sampleda, factorLevels=factorLevels, plotda=TRUE)
mapping <- aes_string(x="readsNums", y="value", color="sample")
if (!missing(factorNames)){
obj <- summarySE(obj, measurevar="value", groupvars=c(factorNames, "readsNums", "Alpha"), na.rm=TRUE)
obj$up <- obj$value - obj$sd
obj$down <- obj$value + obj$sd
mapping <- modifyList(mapping, aes_string(group=factorNames, color=factorNames, fill=factorNames, ymin="up", ymax="down"))
}
}
if (!is.null(indexNames)){
obj <- obj %>% filter(.data$Alpha %in% indexNames)
}
p <- ggplot(data=obj, mapping=mapping) #+
if (!missing(factorNames)){
#p <- p + geom_errorbar(alpha=0.5)
p <- p + geom_ribbon(alpha=0.3, color=NA, show.legend=FALSE)
}
message("The color has been set automatically, you can reset it manually by adding scale_color_manual(values=yourcolors)")
p <- p + geom_smooth(se=se, method = method, size=linesize,formula = formula,...)+
scale_y_continuous(limits=c(0,NA), oob=squish) +
facet_wrap(~ Alpha, scales="free", nrow=facetnrow) +
ylab("alpha metric")+xlab("number of reads")
return(p)
}
#' @title mapping data of ggrarecurve
#' @description
#' generating the data of ggrarecurve.
#' @param data data.frame,(nrow sample * ncol taxonomy
#' (feature) or and factor)
#' @param chunks integer, the number of subsample in a sample,
#' default is 400.
#' @param sampleda data.frame, (nrow sample * ncol factor)
#' @param factorLevels list, the levels of the factors, default is NULL,
#' if you want to order the levels of factor, you can set this.
#' @param plotda boolean, default is TRUE, whether build the data.frame for
#' `geom_bar` of `ggplot2`.
#' @return data.frame for ggrarecurve.
#' @author Shuangbin Xu
#' @importFrom dplyr bind_rows
#' @importFrom reshape melt
#' @importFrom magrittr %>%
#' @keywords internal
stat_rare <- function(data,
chunks=400,
sampleda,
factorLevels,
plotda=TRUE){
tmpfeature <- colnames(data)[vapply(data,is.numeric,logical(1))]
tmpfactor <- colnames(data)[!vapply(data,is.numeric,logical(1))]
dat <- data[ , match(tmpfeature, colnames(data)), drop=FALSE]
out <- apply(dat, 1, samplealpha, chunks=chunks) %>%
bind_rows(,.id="sample")
if (plotda){
if (!missing(sampleda)){
sampleda$sample <- rownames(sampleda)
out <- merge(out, sampleda)
out <- melt(out,id.vars=c(colnames(sampleda), "readsNums"),
variable_name="Alpha")
}
if (missing(sampleda) && length(tmpfactor) > 0){
tmpsample <- data[, tmpfactor, drop=FALSE]
tmpsample$sample <- rownames(tmpsample)
out <- merge(out, tmpsample)
out <- melt(out, id.vars=c("sample", "readsNums", tmpfactor),
variable_name="Alpha")
}
if (missing(sampleda)&&length(tmpfactor) == 0){
out <- melt(out, id.vars=c("sample", "readsNums"),
variable_name="Alpha")
}
}else{
if (!missing(sampleda)){
sampleda$sample <- rownames(sampleda)
out <- merge(out, sampleda)
}
if (missing(sampleda) && length(tmpfactor) >0){
tmpsample <- data[,tmpfactor,drop=FALSE]
tmpsample$sample <- rownames(tmpsample)
out <- merge(out, tmpsample)
}
}
if (!missing(factorLevels)){
out <- setfactorlevels(out, factorLevels)
}
return(out)
}
#' @keywords internal
samplealpha <- function(data, chunks=200){
sdepth <- sum(data)
step <- trunc(sdepth/chunks)
n <- seq(0, sdepth, by=step)[-1]
n <- c(n, sdepth)
out <- lapply(n, function(x){
tmp <- get_alphaindex(data, mindepth=x)
#tmp <- tmp$indexs
tmp$readsNums <- x
return(tmp)})
out <- do.call("rbind", c(out, make.row.names=FALSE))
out[is.na(out)] <- 0
return (out)
}
| /R/rareplot.R | no_license | yiluheihei/MicrobiotaProcess | R | false | false | 4,442 | r | #' @method ggrarecurve default
#' @importFrom ggplot2 ggplot geom_ribbon aes_string geom_smooth facet_wrap scale_y_continuous
#' @importFrom dplyr filter
#' @importFrom rlang .data
#' @importFrom scales squish
#' @importFrom Rmisc summarySE
#' @rdname ggrarecurve
#' @export
ggrarecurve.default <- function(obj,
sampleda, indexNames="Observe", linesize=0.5, facetnrow=1,
mapping=NULL, chunks=400, factorNames, factorLevels, se=FALSE,
method="lm", formula=y ~ log(x), ...){
if (is.null(mapping)){
obj <- stat_rare(data=obj, chunks=chunks, sampleda=sampleda, factorLevels=factorLevels, plotda=TRUE)
mapping <- aes_string(x="readsNums", y="value", color="sample")
if (!missing(factorNames)){
obj <- summarySE(obj, measurevar="value", groupvars=c(factorNames, "readsNums", "Alpha"), na.rm=TRUE)
obj$up <- obj$value - obj$sd
obj$down <- obj$value + obj$sd
mapping <- modifyList(mapping, aes_string(group=factorNames, color=factorNames, fill=factorNames, ymin="up", ymax="down"))
}
}
if (!is.null(indexNames)){
obj <- obj %>% filter(.data$Alpha %in% indexNames)
}
p <- ggplot(data=obj, mapping=mapping) #+
if (!missing(factorNames)){
#p <- p + geom_errorbar(alpha=0.5)
p <- p + geom_ribbon(alpha=0.3, color=NA, show.legend=FALSE)
}
message("The color has been set automatically, you can reset it manually by adding scale_color_manual(values=yourcolors)")
p <- p + geom_smooth(se=se, method = method, size=linesize,formula = formula,...)+
scale_y_continuous(limits=c(0,NA), oob=squish) +
facet_wrap(~ Alpha, scales="free", nrow=facetnrow) +
ylab("alpha metric")+xlab("number of reads")
return(p)
}
#' @title mapping data of ggrarecurve
#' @description
#' generating the data of ggrarecurve.
#' @param data data.frame,(nrow sample * ncol taxonomy
#' (feature) or and factor)
#' @param chunks integer, the number of subsample in a sample,
#' default is 400.
#' @param sampleda data.frame, (nrow sample * ncol factor)
#' @param factorLevels list, the levels of the factors, default is NULL,
#' if you want to order the levels of factor, you can set this.
#' @param plotda boolean, default is TRUE, whether build the data.frame for
#' `geom_bar` of `ggplot2`.
#' @return data.frame for ggrarecurve.
#' @author Shuangbin Xu
#' @importFrom dplyr bind_rows
#' @importFrom reshape melt
#' @importFrom magrittr %>%
#' @keywords internal
stat_rare <- function(data,
chunks=400,
sampleda,
factorLevels,
plotda=TRUE){
tmpfeature <- colnames(data)[vapply(data,is.numeric,logical(1))]
tmpfactor <- colnames(data)[!vapply(data,is.numeric,logical(1))]
dat <- data[ , match(tmpfeature, colnames(data)), drop=FALSE]
out <- apply(dat, 1, samplealpha, chunks=chunks) %>%
bind_rows(,.id="sample")
if (plotda){
if (!missing(sampleda)){
sampleda$sample <- rownames(sampleda)
out <- merge(out, sampleda)
out <- melt(out,id.vars=c(colnames(sampleda), "readsNums"),
variable_name="Alpha")
}
if (missing(sampleda) && length(tmpfactor) > 0){
tmpsample <- data[, tmpfactor, drop=FALSE]
tmpsample$sample <- rownames(tmpsample)
out <- merge(out, tmpsample)
out <- melt(out, id.vars=c("sample", "readsNums", tmpfactor),
variable_name="Alpha")
}
if (missing(sampleda)&&length(tmpfactor) == 0){
out <- melt(out, id.vars=c("sample", "readsNums"),
variable_name="Alpha")
}
}else{
if (!missing(sampleda)){
sampleda$sample <- rownames(sampleda)
out <- merge(out, sampleda)
}
if (missing(sampleda) && length(tmpfactor) >0){
tmpsample <- data[,tmpfactor,drop=FALSE]
tmpsample$sample <- rownames(tmpsample)
out <- merge(out, tmpsample)
}
}
if (!missing(factorLevels)){
out <- setfactorlevels(out, factorLevels)
}
return(out)
}
#' @keywords internal
samplealpha <- function(data, chunks=200){
sdepth <- sum(data)
step <- trunc(sdepth/chunks)
n <- seq(0, sdepth, by=step)[-1]
n <- c(n, sdepth)
out <- lapply(n, function(x){
tmp <- get_alphaindex(data, mindepth=x)
#tmp <- tmp$indexs
tmp$readsNums <- x
return(tmp)})
out <- do.call("rbind", c(out, make.row.names=FALSE))
out[is.na(out)] <- 0
return (out)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.