content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Fitted probabilities from \code{brm} fits
#'
#' @description Calculate fitted probabilities from a fitted binary regression model object.
#'
#' @param object A fitted object from function \code{brm}.
#'
#' @param va.new An optional covariate matrix to make predictions with. If omitted, the original matrix va is used.
#'
#' @param vb.new An optional covariate matrix to make predictions with. If vb.new is omitted but va.new is not, then vb.new is set to be equal to va.new. If both vb.new and va.new are omitted, then the original matrix vb is used.
#'
#' @param x.new An optional vector of x.
#'
#' @param ... affecting the predictions produced.
#'
#' @return If x.new is omitted, a matrix consisting of fitted probabilities for p0 = P(y=1|x=0,va,vb) and p1 = P(y=1|x=1,va,vb).
#'
#' If x.new is supplied, a vector consisting of fitted probabilities px = P(y=1|x=x.new,va,vb).
#'
#' @export
predict.brm = function(object, x.new = NULL, va.new = NULL, vb.new = NULL, ...) {
va = object$va
vb = object$vb
if(is.null(vb.new)){
if(is.null(va.new)){
vb.new = vb
}else{
vb.new = va.new
}
}
if(is.null(va.new)) va.new = va
n = nrow(va.new)
pa = ncol(va.new)
pb = ncol(vb.new)
alpha.est = object$point.est[1:pa]
beta.est = object$point.est[(pa+1):(pa+pb)]
linear.predictors = cbind(va.new %*% alpha.est, vb.new %*% beta.est)
if(object$param=="RR")
p0p1 = getProbRR(linear.predictors)
if(object$param=="RD")
p0p1 = getProbRD(linear.predictors)
if(object$param=="OR"){
p0 = expit(linear.predictors[,2])
or = exp(linear.predictors[,1])
odds1 = or * (p0 / (1-p0))
p1 = odds1 / (1+odds1)
p0p1 = cbind(p0, p1)
}
colnames(p0p1) = c("p0", "p1")
if(!is.null(x.new)){
px = rep(NA, n)
px[x.new == 0] = p0p1[x.new == 0, 1]
px[x.new == 1] = p0p1[x.new == 1, 2]
return(px)
}else{
return(p0p1)
}
}
| /R/predict.brm.R | no_license | mclements/brm | R | false | false | 2,147 | r | #' Fitted probabilities from \code{brm} fits
#'
#' @description Calculate fitted probabilities from a fitted binary regression model object.
#'
#' @param object A fitted object from function \code{brm}.
#'
#' @param va.new An optional covariate matrix to make predictions with. If omitted, the original matrix va is used.
#'
#' @param vb.new An optional covariate matrix to make predictions with. If vb.new is omitted but va.new is not, then vb.new is set to be equal to va.new. If both vb.new and va.new are omitted, then the original matrix vb is used.
#'
#' @param x.new An optional vector of x.
#'
#' @param ... affecting the predictions produced.
#'
#' @return If x.new is omitted, a matrix consisting of fitted probabilities for p0 = P(y=1|x=0,va,vb) and p1 = P(y=1|x=1,va,vb).
#'
#' If x.new is supplied, a vector consisting of fitted probabilities px = P(y=1|x=x.new,va,vb).
#'
#' @export
predict.brm = function(object, x.new = NULL, va.new = NULL, vb.new = NULL, ...) {
va = object$va
vb = object$vb
if(is.null(vb.new)){
if(is.null(va.new)){
vb.new = vb
}else{
vb.new = va.new
}
}
if(is.null(va.new)) va.new = va
n = nrow(va.new)
pa = ncol(va.new)
pb = ncol(vb.new)
alpha.est = object$point.est[1:pa]
beta.est = object$point.est[(pa+1):(pa+pb)]
linear.predictors = cbind(va.new %*% alpha.est, vb.new %*% beta.est)
if(object$param=="RR")
p0p1 = getProbRR(linear.predictors)
if(object$param=="RD")
p0p1 = getProbRD(linear.predictors)
if(object$param=="OR"){
p0 = expit(linear.predictors[,2])
or = exp(linear.predictors[,1])
odds1 = or * (p0 / (1-p0))
p1 = odds1 / (1+odds1)
p0p1 = cbind(p0, p1)
}
colnames(p0p1) = c("p0", "p1")
if(!is.null(x.new)){
px = rep(NA, n)
px[x.new == 0] = p0p1[x.new == 0, 1]
px[x.new == 1] = p0p1[x.new == 1, 2]
return(px)
}else{
return(p0p1)
}
}
|
context("Wrong input test of messages and warnings")
library(ccostr)
# data - history
df_1 <- data.frame(id = c("A", "B" ,"C"),
cost = c(2544,4245,590),
delta = c(0,0,1),
surv = c(343,903,445))
test_that("Base est works", {
expect_true(is.character(ccmean(df_1)$Text))
})
test_that("Base + history message works", {
expect_message(ccmean(df_1))
})
# data + history
df_2 <- data.frame(id = c("A", "A", "A", "B" ,"C", "C", "D"),
start = c(1, 30, 88, 18, 1, 67, 43),
stop = c(1, 82, 88, 198, 5, 88, 44),
cost = c(550, 1949, 45, 4245, 23, 567, 300),
delta = c(0, 0, 0, 0, 1, 1, 1),
surv = c(343, 343, 343, 903, 445, 445, 652),
test = c(0, 0, 0, 0, 1, 1, 1))
test_that("Base + history est works", {
expect_true(is.character(ccmean(df_2)$Text))
})
# Wrong names
df_3 <- data.frame(dfi = c("A", "A", "A", "B" ,"C", "C", "D"),
start = c(1, 30, 88, 18, 1, 67, 43),
stop = c(1, 82, 88, 198, 5, 88, 44),
cost = c(550, 1949, 45, 4245, 23, 567, 300),
dta = c(0, 0, 0, 0, 1, 1, 1),
surv = c(343, 343, 343, 903, 445, 445, 652),
test = c(0, 0, 0, 0, 1, 1, 1))
test_that("Bad names should produce error", {
expect_error(ccmean(df_3))
})
# No history but dublicate id's
df_4 <- data.frame(id = c("A", "B" ,"C", "A"),
cost = c(2544,4245,590, 23),
delta = c(0,0,1,0),
surv = c(343,903,445,343))
test_that("No history and dublicate id's should produce error", {
expect_error(ccmean(df_4))
})
| /tests/testthat/test-differing_input_data.R | permissive | LarsHernandez/ccostr | R | false | false | 1,773 | r |
context("Wrong input test of messages and warnings")
library(ccostr)
# data - history
df_1 <- data.frame(id = c("A", "B" ,"C"),
cost = c(2544,4245,590),
delta = c(0,0,1),
surv = c(343,903,445))
test_that("Base est works", {
expect_true(is.character(ccmean(df_1)$Text))
})
test_that("Base + history message works", {
expect_message(ccmean(df_1))
})
# data + history
df_2 <- data.frame(id = c("A", "A", "A", "B" ,"C", "C", "D"),
start = c(1, 30, 88, 18, 1, 67, 43),
stop = c(1, 82, 88, 198, 5, 88, 44),
cost = c(550, 1949, 45, 4245, 23, 567, 300),
delta = c(0, 0, 0, 0, 1, 1, 1),
surv = c(343, 343, 343, 903, 445, 445, 652),
test = c(0, 0, 0, 0, 1, 1, 1))
test_that("Base + history est works", {
expect_true(is.character(ccmean(df_2)$Text))
})
# Wrong names
df_3 <- data.frame(dfi = c("A", "A", "A", "B" ,"C", "C", "D"),
start = c(1, 30, 88, 18, 1, 67, 43),
stop = c(1, 82, 88, 198, 5, 88, 44),
cost = c(550, 1949, 45, 4245, 23, 567, 300),
dta = c(0, 0, 0, 0, 1, 1, 1),
surv = c(343, 343, 343, 903, 445, 445, 652),
test = c(0, 0, 0, 0, 1, 1, 1))
test_that("Bad names should produce error", {
expect_error(ccmean(df_3))
})
# No history but dublicate id's
df_4 <- data.frame(id = c("A", "B" ,"C", "A"),
cost = c(2544,4245,590, 23),
delta = c(0,0,1,0),
surv = c(343,903,445,343))
test_that("No history and dublicate id's should produce error", {
expect_error(ccmean(df_4))
})
|
##########################################################################
# Copyright 2008-2012 Janssen Research & Development, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###########################################################################
###########################################################################
#ForestPlotLoader
#This will load our input files into variables so we can run the box plot with ANOVA.
###########################################################################
ForestPlot.loader <- function(
input.filename,
statistic = "OR",
concept.dependent,
concept.independent,
concept.reference,
output.file ="ForestPlot"
)
{
######################################################
#We need this package for a str_extract when we take text out of the concept.
library(stringr)
library(plyr)
library(ggplot2)
library(Cairo)
library("rmeta")
######################################################
######################################################
#Read the line graph data.
line.data<-read.delim(input.filename,header=T)
######################################################
if(length(unique(line.data$X)) < 2) stop("||FRIENDLY||The Forest Plot test requires at least two groups for each variable. The intersection of the groups you selected for the independent variable with the data available in the dependent variable yielded only one group with a non-zero number of subjects in the independent variable. Please verify your input and try again.")
if(length(unique(line.data$Y)) < 2) stop("||FRIENDLY||The Forest Plot requires at least two groups for each variable. The intersection of the groups you selected for the dependent variable with the data available in the independent variable yielded only one group with a non-zero number of subjects in the dependent variable. Please verify your input and try again.")
#Make the concept paths pretty by taking out all but the last 2 levels. If the entry doesn't have any slashes, leave it be. It could be a "bin1234" or "Other".
line.data$Z <- as.character(line.data$Z)
line.data$Z[grep("\\\\", line.data$Z)] <- str_extract(line.data$Z[grep("\\\\", line.data$Z)],"(\\\\.+\\\\.+\\\\\\s*)+?$")
line.data$Y <- as.character(line.data$Y)
line.data$Y[grep("\\\\", line.data$Y)] <- str_extract(line.data$Y[grep("\\\\", line.data$Y)],"(\\\\.+\\\\.+\\\\\\s*)+?$")
line.data$X <- as.character(line.data$X)
line.data$X[grep("\\\\", line.data$X)] <- str_extract(line.data$X[grep("\\\\", line.data$X)],"(\\\\.+\\\\.+\\\\\\s*)+?$")
######################################################
if(("GROUP" %in% colnames(line.data)) && ("GROUP.1" %in% colnames(line.data)))
{
#This is a list of the distinct groups.
groupList <- matrix(unique(line.data$GROUP));
#For each of the "GROUPS" we need to call the ForestPlot test on each of the "GROUP.1" values.
subFunctionForForestPlot <- function(currentGroup, groupedData)
{
#Build a list of indexes which represent the records we need to pull for each group.
currentIndex <- which(line.data$GROUP==currentGroup)
#Pull the records into another object.
currentGroupingData <- line.data[currentIndex,]
trimmedGroupName <- gsub("^\\s+|\\s+$", "",currentGroup)
#Run the lm function on each grouping.
lapply(split(currentGroupingData,currentGroupingData$GROUP.1),ForestPlot.loader.single,"GROUP.1",trimmedGroupName)
}
#This calls the first function on each "GROUP" which will call another function on "GROUP.1"
lapply(groupList,subFunctionForForestPlot,groupedData)
}
else if("GROUP.1" %in% colnames(line.data))
{
lapply(split(line.data, line.data$GROUP.1), ForestPlot.loader.single,"GROUP.1","")
}
else if("GROUP" %in% colnames(line.data))
{
lapply(split(line.data, line.data$GROUP), ForestPlot.loader.single,"GROUP","")
}
else
{
ForestPlot.loader.single(
line.data,
'',
'',
output.file=output.file,
statistic=statistic,
concept.dependent=concept.dependent,
concept.independent=concept.independent,
concept.reference = concept.reference)
}
######################################################
}
ForestPlot.loader.single <-
function(
dataChunk,
splitColumn,
fileNameQualifier,
output.file,
statistic=statistic,
concept.dependent=concept.dependent,
concept.independent=concept.independent,
concept.reference
)
{
#This is the name of the output file for the statistical tests.
statisticalTestsResultsFile <- paste("statisticalTests",fileNameQualifier,".txt",sep="")
#This is the name of the output file for the count table.
countsFile <- paste("Count",fileNameQualifier,".txt",sep="")
#This is the current group we are generating the statistics for.
currentGroup <- unique(dataChunk[[splitColumn]])
#If we have a group column we should write the group name to the file.
if(splitColumn %in% colnames(dataChunk)) write(paste("name=",currentGroup,sep=""), file=statisticalTestsResultsFile,append=T)
if(splitColumn %in% colnames(dataChunk)) write(paste("name=",currentGroup,sep=""), file=countsFile,append=T)
#Remove any unwanted columns.
# X is the dependent concept column
# Y is the independent concept column
# Z is the stratificaion concept column
dataChunk <- dataChunk[c('X','REFERENCE','Y','OUTCOME','Z')]
#extract the list of all distinct startification entires
strata <- unique(dataChunk[,c('Z')])
if(all(is.na(strata)))
{
dataChunk$Z <- 'ALL'
strata <- c('ALL')
}
#number of stratification concepts
numstrata <- length(strata)
#create a sub matrix of dependent & independent variables
#by seperating the dataChuck by each stratification concept
strata.df <- invisible(lapply(split(dataChunk, dataChunk$Z), function(x) { assign(paste0("Z", x$Z[1]), x, pos=.GlobalEnv) }))
#Variable to hold the list of all the countTables,a countTable is generated for each stratification
countTable.list = list()
#Variable to hold the counts.
TreatmentEvents = list()
ControlEvents = list()
TreatmentNoEvents = list()
ControlNoEvents = list()
#row & col names
studylabels<-getStudyLabels(paste(concept.independent,concept.reference,sep="|"))
eventlabels<-getEventLabels(concept.dependent)
#for each stratafication entry, create a tableCount
for (i in strata)
{
#At the same time that we count for the Fisher test we will count for the Forest Plot. These counts are done seperately so we can keep track of the Control and "Top Event".
TreatmentEvents[[i]] <- nrow(subset(strata.df[[i]], (REFERENCE == 0) & (OUTCOME == 1)))
ControlEvents[[i]] <- nrow(subset(strata.df[[i]], (REFERENCE == 1) & (OUTCOME == 1)))
TreatmentNoEvents[[i]] <- nrow(subset(strata.df[[i]], (REFERENCE == 0) & (OUTCOME == 0)))
ControlNoEvents[[i]] <- nrow(subset(strata.df[[i]], (REFERENCE == 1) & (OUTCOME == 0)))
if(any(TreatmentEvents[[i]]) | any(ControlEvents[[i]]) | any(TreatmentNoEvents[[i]]) | any(ControlNoEvents[[i]]))
{
TreatmentEvents[[i]] <- TreatmentEvents[[i]] + .5
ControlEvents[[i]] <- ControlEvents[[i]] + .5
TreatmentNoEvents[[i]] <- TreatmentNoEvents[[i]] + .5
ControlNoEvents[[i]] <- ControlNoEvents[[i]] + .5
}
#Remove unwanted column.
data <- strata.df[[i]][c('X','Y')]
#Recreate the factors to take out the levels we don't use in this group.
data$X <- factor(data$X)
data$Y <- factor(data$Y)
#Generate count table.
countTable.list[[i]] <- table(data)
#We can't run the Fisher test if there aren't two groups in either the x or y columns.
#Generate a data frame to check the groupings. Syntax is easier if it's in a data.frame first.
tempFrame <- data.frame(countTable.list[[i]])
#Check the number of unique X and Y values.
if(length(unique(tempFrame$X)) < 2 || length(unique(tempFrame$Y)) < 2)
{
#Since we don't have enough groups we can't run the Fisher test.
fisherResults <- NA
chiResults <- NA
fisherResults$p.value <- NA
chiResults$p.value <- NA
chiResults$statistic <- NA
}
else
{
#Get fisher test statistics.
fisherResults <- fisher.test(countTable.list[[i]],simulate.p.value=FALSE)
#Get chi^2 test statistics.
chiResults <- chisq.test(countTable.list[[i]])
}
#The name of the header reflects the name of the function, we'll call the header Total so it appears "pretty" in the table.
Total <- sum
#Add marginal frequencies for X and Y
countTable.list[[i]] <- addmargins(countTable.list[[i]], FUN = Total)
#Start writing the data to the file. Start with the stratification name.
write(paste("stratificationName=",i,sep=""), file=countsFile,append=T)
#Next is the table of counts.
write.table(countTable.list[[i]], countsFile,quote= F, sep= "\t",row.names=T,col.names=T, append=T)
#Write the stats to the file.
write(paste("fisherResults.p.Value", format(fisherResults$p.value,digits=3),NA,NA,sep="\t"), countsFile, append=T)
write(paste("chiResults.p.value", format(chiResults$p.value,digits=3),NA,NA,sep="\t"), countsFile, append=T)
write(paste("chiResults.statistic", format(chiResults$statistic,digits=3),NA,NA,sep="\t"), countsFile, append=T)
}
#Plot lists of counts
makeForestPlotForRCTs(
TreatmentEvents,
ControlEvents,
TreatmentNoEvents,
ControlNoEvents,
strata,
statistic=statistic,
2,
output.file=output.file,
concept.dependent=concept.dependent,
concept.independent=concept.independent,
concept.reference = concept.reference)
}
###############################################################################
# The below method was adopted and modified from the
# http://a-little-book-of-r-for-biomedical-statistics.readthedocs.org/en/latest/_sources/src/biomedicalstats.txt
# and the rmeta pkg documentation
##############################################################################
makeForestPlotForRCTs <- function(
TreatmentEvents,
ControlEvents,
TreatmentNoEvents,
ControlNoEvents,
strataList,
statistic="OR",
fileNameQualifier="",
output.file,
concept.dependent,
concept.independent,
concept.reference)
{
require("rmeta")
library(Cairo)
#If we have a qualifier we need to throw a "_" after the name of the file.
if(fileNameQualifier != '') fileNameQualifier <- paste('_',fileNameQualifier,sep="");
#This is the name of the output file for the statistical tests.
forestPlotResultsFile <- paste("forestPlotTextTable",fileNameQualifier,".txt",sep="");
numstrata <- length(strataList)
ntrt.vec <- vector() #trt_total.vec
nctrl.vec <- vector() #ctrl_total.vec
ptrt.vec <- vector() #ptrt.vec
pctrl.vec <- vector() #pctrl.vec
#Loop through each of the strata to get the counts for the Exposed and unexposed groups.
for (i in strataList)
{
#Make an array "nctrl.vec" of the Number of subjects in control group, in each stratum
nctrl.vec[i] <- ControlEvents[[i]] + ControlNoEvents[[i]]
#Make an array "ntrt.vec" of the Number of subjects in treated/exposed group, in each stratum
ntrt.vec[i] <- TreatmentEvents[[i]] + TreatmentNoEvents[[i]]
#Make an array "pctrl.vec" of the Number of events in control group, in each stratum
pctrl.vec[i] <- ControlEvents[[i]]
#Make an array "ptrt.vec" of the Number of events in treated/exposed group, in each stratum
ptrt.vec[i] <- TreatmentEvents[[i]]
}
#Do a text replace on the strata name if applicable.
if(length(strataList) ==1 && is.na(match("NA", strataList))) strataList=c("All")
#Build a (Mantel-Haenszel) meta-analysis.
myMH <- meta.MH(ntrt.vec, nctrl.vec, ptrt.vec, pctrl.vec, conf.level=0.95, names=strataList,statistic=statistic)
#Build vectors to hold statistics.
mean <-vector()
lower<-vector()
upper<-vector()
#Point estimates from studies
logRatio <- NA
#Standard errors of Point estimates
selogRatio <- NA
#Precision: box ares is proportional to this. 1/se^2 is the default
#Run the selected statistical test.
if(statistic =="OR")
{
#This is the label that tells us what type of test was run.
stat_label <- "Odds Ratio"
mean<- c(myMH$logOR)
mean[is.infinite(mean)] <- NA
lower<- mean-c(myMH$selogOR)*2
upper<- mean+c(myMH$selogOR)*2
logRatio <- myMH$logOR
selogRatio <- myMH$selogOR
}
else
{
stat_label<-"Relative Risk"
mean<- c(myMH$logRR)
mean[is.infinite(mean)] <- NA
lower<- mean-c(myMH$selogRR)*2
upper<- mean+c(myMH$selogRR)*2
logRatio <- myMH$logRR
selogRatio <- myMH$selogRR
}
#If the odds ratio/relative risk cannot be calculated, replace the Inf with NA.
upper[is.infinite(upper)] <- NA
lower[is.infinite(lower)] <- NA
logRatio[is.infinite(logRatio)] <- NA
selogRatio[is.infinite(selogRatio)] <- NA
stratNames <- as.character(strataList)
stratLabel <- "Stratification"
tabletext<-cbind(c(stratLabel,stratNames,"Summary"),
c(stat_label,format(exp(mean),digits=3),format(exp(myMH$logMH),digits=3)),
c("Est. ( 95% CI )",paste("(",format(exp(lower),digits=3),"-",format(exp(upper),digits=3),")",sep=" "),""))
write.table(tabletext, "statisticByStratificationTable.txt", row.names=F,col.names=F, sep="\t", quote= F)
#This is the name of the output image file.
imageFileName <- paste(output.file,".png",sep="")
#This initializes our image capture object.
CairoPNG(file=imageFileName,width=800,height=600)
#Increase the font size of the plot a small amount.
par(cex=1.3)
if(all(is.na(logRatio)) | all(is.na(selogRatio)))
{
stop("||FRIENDLY||The Forest Plot failed to calculate ratios for all of your stratifications, The plot cannot be created.")
}
myMH$names[grep("\\\\", myMH$names)] <- str_extract(myMH$names[grep("\\\\", myMH$names)],"(\\\\.+\\\\\\s*)+?$")
#Plot the forest plot.
metaplot( mn = logRatio,
se = selogRatio,
labels = myMH$names,
summn = myMH$logMH,
sumse = myMH$selogMH,
sumnn = myMH$selogMH^-2,
ylab = "Stratification Variable",
xlab = stat_label,
logeffect = TRUE)
dev.off()
}
getStudyLabels <-function(concept.independent){
######################################################
#Label the table
splitConcept <- strsplit(concept.independent,"\\|");
entry <- unlist(splitConcept);
#Make the column name pretty.
entry <- sub(pattern="^\\\\(.*?\\\\){3}",replacement="",x=entry,perl=TRUE)
entry <- gsub("^\\s+|\\s+$", "",entry)
entry <- gsub("^\\\\|\\\\$", "",entry)
studylabels <- gsub("\\\\", "-",entry)
return (studylabels)
}
getEventLabels<-function(concept.dependent){
splitConcept <- strsplit(concept.dependent,"\\|");
entry <- unlist(splitConcept);
#Make the column name pretty.
entry <- sub(pattern="^\\\\(.*?\\\\){3}",replacement="",x=entry,perl=TRUE)
entry <- gsub("^\\s+|\\s+$", "",entry)
entry <- gsub("^\\\\|\\\\$", "",entry)
eventlabels <- gsub("\\\\", "-",entry)
return (eventlabels)
}
getFixedLengthLabel<-function(label,length,brackets=F){
#getlast occurance of "-"
loc = sapply(gregexpr("\\-", label), tail, 1)
if(loc!= -1){
label<-str_sub(label,loc+1)#add one
}
if(str_length(label) > length){
label<-str_sub(label,1,length)
label<-str_c(label,"...")
} else{
label<-str_sub(label,1,length)
}
if(brackets){
label<-str_c("(",label,")")
}
return (str_pad(label, length+4, "both"))
}
| /Rmodules/web-app/Rscripts/ForestPlot/ForestPlotLoader.R | permissive | tranSMART-Foundation/transmart | R | false | false | 15,906 | r | ##########################################################################
# Copyright 2008-2012 Janssen Research & Development, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###########################################################################
###########################################################################
#ForestPlotLoader
#This will load our input files into variables so we can run the box plot with ANOVA.
###########################################################################
ForestPlot.loader <- function(
input.filename,
statistic = "OR",
concept.dependent,
concept.independent,
concept.reference,
output.file ="ForestPlot"
)
{
######################################################
#We need this package for a str_extract when we take text out of the concept.
library(stringr)
library(plyr)
library(ggplot2)
library(Cairo)
library("rmeta")
######################################################
######################################################
#Read the line graph data.
line.data<-read.delim(input.filename,header=T)
######################################################
if(length(unique(line.data$X)) < 2) stop("||FRIENDLY||The Forest Plot test requires at least two groups for each variable. The intersection of the groups you selected for the independent variable with the data available in the dependent variable yielded only one group with a non-zero number of subjects in the independent variable. Please verify your input and try again.")
if(length(unique(line.data$Y)) < 2) stop("||FRIENDLY||The Forest Plot requires at least two groups for each variable. The intersection of the groups you selected for the dependent variable with the data available in the independent variable yielded only one group with a non-zero number of subjects in the dependent variable. Please verify your input and try again.")
#Make the concept paths pretty by taking out all but the last 2 levels. If the entry doesn't have any slashes, leave it be. It could be a "bin1234" or "Other".
line.data$Z <- as.character(line.data$Z)
line.data$Z[grep("\\\\", line.data$Z)] <- str_extract(line.data$Z[grep("\\\\", line.data$Z)],"(\\\\.+\\\\.+\\\\\\s*)+?$")
line.data$Y <- as.character(line.data$Y)
line.data$Y[grep("\\\\", line.data$Y)] <- str_extract(line.data$Y[grep("\\\\", line.data$Y)],"(\\\\.+\\\\.+\\\\\\s*)+?$")
line.data$X <- as.character(line.data$X)
line.data$X[grep("\\\\", line.data$X)] <- str_extract(line.data$X[grep("\\\\", line.data$X)],"(\\\\.+\\\\.+\\\\\\s*)+?$")
######################################################
if(("GROUP" %in% colnames(line.data)) && ("GROUP.1" %in% colnames(line.data)))
{
#This is a list of the distinct groups.
groupList <- matrix(unique(line.data$GROUP));
#For each of the "GROUPS" we need to call the ForestPlot test on each of the "GROUP.1" values.
subFunctionForForestPlot <- function(currentGroup, groupedData)
{
#Build a list of indexes which represent the records we need to pull for each group.
currentIndex <- which(line.data$GROUP==currentGroup)
#Pull the records into another object.
currentGroupingData <- line.data[currentIndex,]
trimmedGroupName <- gsub("^\\s+|\\s+$", "",currentGroup)
#Run the lm function on each grouping.
lapply(split(currentGroupingData,currentGroupingData$GROUP.1),ForestPlot.loader.single,"GROUP.1",trimmedGroupName)
}
#This calls the first function on each "GROUP" which will call another function on "GROUP.1"
lapply(groupList,subFunctionForForestPlot,groupedData)
}
else if("GROUP.1" %in% colnames(line.data))
{
lapply(split(line.data, line.data$GROUP.1), ForestPlot.loader.single,"GROUP.1","")
}
else if("GROUP" %in% colnames(line.data))
{
lapply(split(line.data, line.data$GROUP), ForestPlot.loader.single,"GROUP","")
}
else
{
ForestPlot.loader.single(
line.data,
'',
'',
output.file=output.file,
statistic=statistic,
concept.dependent=concept.dependent,
concept.independent=concept.independent,
concept.reference = concept.reference)
}
######################################################
}
ForestPlot.loader.single <-
function(
dataChunk,
splitColumn,
fileNameQualifier,
output.file,
statistic=statistic,
concept.dependent=concept.dependent,
concept.independent=concept.independent,
concept.reference
)
{
#This is the name of the output file for the statistical tests.
statisticalTestsResultsFile <- paste("statisticalTests",fileNameQualifier,".txt",sep="")
#This is the name of the output file for the count table.
countsFile <- paste("Count",fileNameQualifier,".txt",sep="")
#This is the current group we are generating the statistics for.
currentGroup <- unique(dataChunk[[splitColumn]])
#If we have a group column we should write the group name to the file.
if(splitColumn %in% colnames(dataChunk)) write(paste("name=",currentGroup,sep=""), file=statisticalTestsResultsFile,append=T)
if(splitColumn %in% colnames(dataChunk)) write(paste("name=",currentGroup,sep=""), file=countsFile,append=T)
#Remove any unwanted columns.
# X is the dependent concept column
# Y is the independent concept column
# Z is the stratificaion concept column
dataChunk <- dataChunk[c('X','REFERENCE','Y','OUTCOME','Z')]
#extract the list of all distinct startification entires
strata <- unique(dataChunk[,c('Z')])
if(all(is.na(strata)))
{
dataChunk$Z <- 'ALL'
strata <- c('ALL')
}
#number of stratification concepts
numstrata <- length(strata)
#create a sub matrix of dependent & independent variables
#by seperating the dataChuck by each stratification concept
strata.df <- invisible(lapply(split(dataChunk, dataChunk$Z), function(x) { assign(paste0("Z", x$Z[1]), x, pos=.GlobalEnv) }))
#Variable to hold the list of all the countTables,a countTable is generated for each stratification
countTable.list = list()
#Variable to hold the counts.
TreatmentEvents = list()
ControlEvents = list()
TreatmentNoEvents = list()
ControlNoEvents = list()
#row & col names
studylabels<-getStudyLabels(paste(concept.independent,concept.reference,sep="|"))
eventlabels<-getEventLabels(concept.dependent)
#for each stratafication entry, create a tableCount
for (i in strata)
{
#At the same time that we count for the Fisher test we will count for the Forest Plot. These counts are done seperately so we can keep track of the Control and "Top Event".
TreatmentEvents[[i]] <- nrow(subset(strata.df[[i]], (REFERENCE == 0) & (OUTCOME == 1)))
ControlEvents[[i]] <- nrow(subset(strata.df[[i]], (REFERENCE == 1) & (OUTCOME == 1)))
TreatmentNoEvents[[i]] <- nrow(subset(strata.df[[i]], (REFERENCE == 0) & (OUTCOME == 0)))
ControlNoEvents[[i]] <- nrow(subset(strata.df[[i]], (REFERENCE == 1) & (OUTCOME == 0)))
if(any(TreatmentEvents[[i]]) | any(ControlEvents[[i]]) | any(TreatmentNoEvents[[i]]) | any(ControlNoEvents[[i]]))
{
TreatmentEvents[[i]] <- TreatmentEvents[[i]] + .5
ControlEvents[[i]] <- ControlEvents[[i]] + .5
TreatmentNoEvents[[i]] <- TreatmentNoEvents[[i]] + .5
ControlNoEvents[[i]] <- ControlNoEvents[[i]] + .5
}
#Remove unwanted column.
data <- strata.df[[i]][c('X','Y')]
#Recreate the factors to take out the levels we don't use in this group.
data$X <- factor(data$X)
data$Y <- factor(data$Y)
#Generate count table.
countTable.list[[i]] <- table(data)
#We can't run the Fisher test if there aren't two groups in either the x or y columns.
#Generate a data frame to check the groupings. Syntax is easier if it's in a data.frame first.
tempFrame <- data.frame(countTable.list[[i]])
#Check the number of unique X and Y values.
if(length(unique(tempFrame$X)) < 2 || length(unique(tempFrame$Y)) < 2)
{
#Since we don't have enough groups we can't run the Fisher test.
fisherResults <- NA
chiResults <- NA
fisherResults$p.value <- NA
chiResults$p.value <- NA
chiResults$statistic <- NA
}
else
{
#Get fisher test statistics.
fisherResults <- fisher.test(countTable.list[[i]],simulate.p.value=FALSE)
#Get chi^2 test statistics.
chiResults <- chisq.test(countTable.list[[i]])
}
#The name of the header reflects the name of the function, we'll call the header Total so it appears "pretty" in the table.
Total <- sum
#Add marginal frequencies for X and Y
countTable.list[[i]] <- addmargins(countTable.list[[i]], FUN = Total)
#Start writing the data to the file. Start with the stratification name.
write(paste("stratificationName=",i,sep=""), file=countsFile,append=T)
#Next is the table of counts.
write.table(countTable.list[[i]], countsFile,quote= F, sep= "\t",row.names=T,col.names=T, append=T)
#Write the stats to the file.
write(paste("fisherResults.p.Value", format(fisherResults$p.value,digits=3),NA,NA,sep="\t"), countsFile, append=T)
write(paste("chiResults.p.value", format(chiResults$p.value,digits=3),NA,NA,sep="\t"), countsFile, append=T)
write(paste("chiResults.statistic", format(chiResults$statistic,digits=3),NA,NA,sep="\t"), countsFile, append=T)
}
#Plot lists of counts
makeForestPlotForRCTs(
TreatmentEvents,
ControlEvents,
TreatmentNoEvents,
ControlNoEvents,
strata,
statistic=statistic,
2,
output.file=output.file,
concept.dependent=concept.dependent,
concept.independent=concept.independent,
concept.reference = concept.reference)
}
###############################################################################
# The below method was adopted and modified from the
# http://a-little-book-of-r-for-biomedical-statistics.readthedocs.org/en/latest/_sources/src/biomedicalstats.txt
# and the rmeta pkg documentation
##############################################################################
makeForestPlotForRCTs <- function(
TreatmentEvents,
ControlEvents,
TreatmentNoEvents,
ControlNoEvents,
strataList,
statistic="OR",
fileNameQualifier="",
output.file,
concept.dependent,
concept.independent,
concept.reference)
{
require("rmeta")
library(Cairo)
#If we have a qualifier we need to throw a "_" after the name of the file.
if(fileNameQualifier != '') fileNameQualifier <- paste('_',fileNameQualifier,sep="");
#This is the name of the output file for the statistical tests.
forestPlotResultsFile <- paste("forestPlotTextTable",fileNameQualifier,".txt",sep="");
numstrata <- length(strataList)
ntrt.vec <- vector() #trt_total.vec
nctrl.vec <- vector() #ctrl_total.vec
ptrt.vec <- vector() #ptrt.vec
pctrl.vec <- vector() #pctrl.vec
#Loop through each of the strata to get the counts for the Exposed and unexposed groups.
for (i in strataList)
{
#Make an array "nctrl.vec" of the Number of subjects in control group, in each stratum
nctrl.vec[i] <- ControlEvents[[i]] + ControlNoEvents[[i]]
#Make an array "ntrt.vec" of the Number of subjects in treated/exposed group, in each stratum
ntrt.vec[i] <- TreatmentEvents[[i]] + TreatmentNoEvents[[i]]
#Make an array "pctrl.vec" of the Number of events in control group, in each stratum
pctrl.vec[i] <- ControlEvents[[i]]
#Make an array "ptrt.vec" of the Number of events in treated/exposed group, in each stratum
ptrt.vec[i] <- TreatmentEvents[[i]]
}
#Do a text replace on the strata name if applicable.
if(length(strataList) ==1 && is.na(match("NA", strataList))) strataList=c("All")
#Build a (Mantel-Haenszel) meta-analysis.
myMH <- meta.MH(ntrt.vec, nctrl.vec, ptrt.vec, pctrl.vec, conf.level=0.95, names=strataList,statistic=statistic)
#Build vectors to hold statistics.
mean <-vector()
lower<-vector()
upper<-vector()
#Point estimates from studies
logRatio <- NA
#Standard errors of Point estimates
selogRatio <- NA
#Precision: box ares is proportional to this. 1/se^2 is the default
#Run the selected statistical test.
if(statistic =="OR")
{
#This is the label that tells us what type of test was run.
stat_label <- "Odds Ratio"
mean<- c(myMH$logOR)
mean[is.infinite(mean)] <- NA
lower<- mean-c(myMH$selogOR)*2
upper<- mean+c(myMH$selogOR)*2
logRatio <- myMH$logOR
selogRatio <- myMH$selogOR
}
else
{
stat_label<-"Relative Risk"
mean<- c(myMH$logRR)
mean[is.infinite(mean)] <- NA
lower<- mean-c(myMH$selogRR)*2
upper<- mean+c(myMH$selogRR)*2
logRatio <- myMH$logRR
selogRatio <- myMH$selogRR
}
#If the odds ratio/relative risk cannot be calculated, replace the Inf with NA.
upper[is.infinite(upper)] <- NA
lower[is.infinite(lower)] <- NA
logRatio[is.infinite(logRatio)] <- NA
selogRatio[is.infinite(selogRatio)] <- NA
stratNames <- as.character(strataList)
stratLabel <- "Stratification"
tabletext<-cbind(c(stratLabel,stratNames,"Summary"),
c(stat_label,format(exp(mean),digits=3),format(exp(myMH$logMH),digits=3)),
c("Est. ( 95% CI )",paste("(",format(exp(lower),digits=3),"-",format(exp(upper),digits=3),")",sep=" "),""))
write.table(tabletext, "statisticByStratificationTable.txt", row.names=F,col.names=F, sep="\t", quote= F)
#This is the name of the output image file.
imageFileName <- paste(output.file,".png",sep="")
#This initializes our image capture object.
CairoPNG(file=imageFileName,width=800,height=600)
#Increase the font size of the plot a small amount.
par(cex=1.3)
if(all(is.na(logRatio)) | all(is.na(selogRatio)))
{
stop("||FRIENDLY||The Forest Plot failed to calculate ratios for all of your stratifications, The plot cannot be created.")
}
myMH$names[grep("\\\\", myMH$names)] <- str_extract(myMH$names[grep("\\\\", myMH$names)],"(\\\\.+\\\\\\s*)+?$")
#Plot the forest plot.
metaplot( mn = logRatio,
se = selogRatio,
labels = myMH$names,
summn = myMH$logMH,
sumse = myMH$selogMH,
sumnn = myMH$selogMH^-2,
ylab = "Stratification Variable",
xlab = stat_label,
logeffect = TRUE)
dev.off()
}
getStudyLabels <-function(concept.independent){
######################################################
#Label the table
splitConcept <- strsplit(concept.independent,"\\|");
entry <- unlist(splitConcept);
#Make the column name pretty.
entry <- sub(pattern="^\\\\(.*?\\\\){3}",replacement="",x=entry,perl=TRUE)
entry <- gsub("^\\s+|\\s+$", "",entry)
entry <- gsub("^\\\\|\\\\$", "",entry)
studylabels <- gsub("\\\\", "-",entry)
return (studylabels)
}
getEventLabels<-function(concept.dependent){
splitConcept <- strsplit(concept.dependent,"\\|");
entry <- unlist(splitConcept);
#Make the column name pretty.
entry <- sub(pattern="^\\\\(.*?\\\\){3}",replacement="",x=entry,perl=TRUE)
entry <- gsub("^\\s+|\\s+$", "",entry)
entry <- gsub("^\\\\|\\\\$", "",entry)
eventlabels <- gsub("\\\\", "-",entry)
return (eventlabels)
}
getFixedLengthLabel<-function(label,length,brackets=F){
#getlast occurance of "-"
loc = sapply(gregexpr("\\-", label), tail, 1)
if(loc!= -1){
label<-str_sub(label,loc+1)#add one
}
if(str_length(label) > length){
label<-str_sub(label,1,length)
label<-str_c(label,"...")
} else{
label<-str_sub(label,1,length)
}
if(brackets){
label<-str_c("(",label,")")
}
return (str_pad(label, length+4, "both"))
}
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix takes a matrix and creates a list that contains accessor methods for that matrix and it's inverse.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set_matrix <- function(y) {
x <<- y
inverse <<- NULL
}
get_matrix <- function() x
set_inverse <- function(inv) inverse <<- inv
get_inverse <- function() inverse
list(set_matrix = set_matrix, get_matrix = get_matrix,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
## cacheSolve takes a cacheMatrix from the method above, andchecks for an inverse of
## the matrix stored in the cache and returns it. If no inverse was cached, it calculates,
## caches and returns the inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$get_inverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
matrix <- x$get_matrix()
inv <- solve(matrix)
x$set_inverse(inv)
inv
}
| /cachematrix.R | no_license | AppTrain/ProgrammingAssignment2 | R | false | false | 1,099 | r | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix takes a matrix and creates a list that contains accessor methods for that matrix and it's inverse.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set_matrix <- function(y) {
x <<- y
inverse <<- NULL
}
get_matrix <- function() x
set_inverse <- function(inv) inverse <<- inv
get_inverse <- function() inverse
list(set_matrix = set_matrix, get_matrix = get_matrix,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
## cacheSolve takes a cacheMatrix from the method above, andchecks for an inverse of
## the matrix stored in the cache and returns it. If no inverse was cached, it calculates,
## caches and returns the inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$get_inverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
matrix <- x$get_matrix()
inv <- solve(matrix)
x$set_inverse(inv)
inv
}
|
cs1.5=matrix(data=NA,nrow=4,ncol=21)
for (i in 1:21)
{ for (j in 22:25)
{
v=VAR(cbind(diff(diff(a[,j])),diff(diff(a[,i]))),p=5)
cs1.5[j-21,i]=causality(v)$Granger$p.value
}}
cs1.5 | /US-CPI-Consumer-Goods/R Code/Cbind Causality.r | no_license | jzt5132/Time-Series-Stuff | R | false | false | 179 | r | cs1.5=matrix(data=NA,nrow=4,ncol=21)
for (i in 1:21)
{ for (j in 22:25)
{
v=VAR(cbind(diff(diff(a[,j])),diff(diff(a[,i]))),p=5)
cs1.5[j-21,i]=causality(v)$Granger$p.value
}}
cs1.5 |
#' Occupancy detection Function
#'
#' Run occupancy detection models using the output from \code{formatOccData}
#'
#' This function requires both the R package R2jags and the program JAGS.
#' These are not installed by default when sparta is loaded and so should be
#' installed by the user. More details can be found in teh vignette.
#'
#' @param taxa_name A character giving the name of the species to be modelled.
#' @param occDetdata The 2nd element of the object returned by formatOccData.
#' @param spp_vis The 1st element of the object returned by formatOccData.
#' @param n_iterations numeric, An MCMC parameter - The number of interations
#' @param nyr numeric, the minimum number of periods on which a site must have records for it
#' to be included in the models. Defaults to 2
#' @param burnin numeric, An MCMC parameter - The length of the burn in
#' @param thinning numeric, An MCMC parameter - The thinning factor
#' @param n_chains numeric, an MCMC parameter - The number of chains to be run
#' @param write_results logical, should results be saved to \code{output_dir}. This is
#' recommended since these models can take a long time to run. If \code{TRUE} (default)
#' the results from each species will be saved as an .rdata file once the species
#' has run. This prevents loss of data should anything go wrong.
#' @param output_dir character, the output directory were the output for each taxa will be saved
#' as .rdata files. This will defualt to the working directory
#' @param modeltype A character string or vector of strings that specifies the model to use. See details. If
#' used then model.function is ignored.
#' @param regional_codes A data.frame object detailing which site is associated with which region.
#' each row desginates a site and each column represents a region. The first column represents the
#' site name (as in \code{site}). Subsequent columns are named for each regions with 1 representing
#' the site is in that region and 0 that it is not. NOTE a site should only be in one region
#' @param region_aggs A named list giving aggregations of regions that you want trend
#' estimates for. For example \code{region_aggs = list(GB = c('england', 'scotland', 'wales'))}
#' will produced a trend for GB (Great Britain) as well as its constituent nations. Note that
#' 'england', scotland' and 'wales' must appear as names of columns in \code{regional_codes}.
#' More than one aggregate can be given, eg \code{region_aggs = list(GB = c('england', 'scotland',
#' 'wales'), UK = c('england', 'scotland', 'wales', 'northern_ireland'))}.
#' @param max_year numeric, final year to which analysis will be run, this can be set if it is beyond
#' the limit of the dataset. Defaults to final year of the dataset.
#' @param seed numeric, uses \code{set.seed} to set the randon number seed. Setting
#' this number ensures repeatabl analyses
#' @param model.function optionally a user defined BUGS model coded as a function (see \code{?jags},
#' including the example there, for how this is done)
#' @param additional.parameters A character vector of additional parameters to monitor
#' @param additional.BUGS.elements A named list giving additioanl bugs elements passed
#' to \code{R2jags::jags} 'data' argument
#' @param additional.init.values A named list giving user specified initial values to
#' be added to the defaults.
#' @param return_data Logical, if \code{TRUE} (default) the BUGS data object is returned with the data
#' @param saveMatrix Logical, if \code{FALSE} (default) the sims.matrix element of the jags object is omitted, in order to reduce the filesize.
#' @param criterion Determines whether the model should be run. If an integer then this defines the threshold number of records (50 in Outhwaite et al 2019).
#' Other options are `EqualWt` or `HighSpec`, which define the application of "rules of thumb" defined in Pocock et al 2019.
#' Defaults to 1, in which case the model is applied for so long there is a single record of the focal species.
#' @param provenance An optional text string allowing the user to identify the dataset.
#' @param rem_aggs_with_missing_regions An option which if TRUE will remove all aggregates which contain at least one region with no data.
#' If FALSE, only aggregates where ALL regions in that aggregate contain no data, are dropped. Defaults to TRUE
#'
#' @details \code{modeltype} is used to choose the model as well as the associated initial values,
#' and parameters to monitor. Elements to choose from can be separated into the following components:
#'
#' A. Prior type: this has 3 options, each of which was tested in Outhwaite et al (in review):
#' 1. sparta - This uses the same model as in Isaac et al (2014).
#' 2. indran - This is the adaptive stationary model.
#' 3. ranwalk - This is the random walk model.
#'
#' B. Hyperprior type: This has 3 options, each of these are discussed in Outhwaite et al (in review):
#' 1. halfuniform - the original formulation in Isaac et al (2014).
#' 2. halfcauchy - preferred form, tested in Outhwaite et al (2018).
#' 3. inversegamma - alternative form presented in the literature.
#'
#' C. List length specification: This has 3 options:
#' 1. catlistlength - list length as a categorical variable.
#' 2. contlistlength - list length as a continuous variable.
#' 3. nolistlength - no list length variable.
#'
#' D. Julian date: this is an additional option for including Julian date within the detection model:
#' 1. jul_date.
#'
#' Not all combinations are available in sparta. You will get an error if you try and use
#' a combination that is not supported. There is usually a good reason why that
#' combination is not a good idea. Here are the model elements available:
#'
#' \itemize{
#' \item{\code{"sparta"}}{ - This uses the same model as in Isaac et al (2014)}
#' \item{\code{"indran"}}{ - Here the prior for the year effect of the state model is modelled as a random effect. This allows the model to adapt to interannual variability.}
#' \item{\code{"ranwalk"}}{ - Here the prior for the year effect of the state model is modelled as a random walk. Each estimate for the year effect is dependent on that of the previous year.}
#' \item{\code{"halfcauchy"}}{ - Includes half-Cauchy hyperpriors for all random effects within the model. The half-Cauchy is a special case of the Student’s t distribution with 1 degree of freedom. }
#' \item{\code{"inversegamma"}}{ - Includes inverse-gamma hyperpriors for random effects within the model}
#' \item{\code{"catlistlength"}}{ - This specifies that list length should be considered as a catagorical variable. There are 3 classes, lists of length 1, 2-3, and 4 and over. If none of the list length options are specifed 'contlistlength' is used}
#' \item{\code{"contlistlength"}}{ - This specifies that list length should be considered as a continious variable. If none of the list length options are specifed 'contlistlength' is used}
#' \item{\code{"nolistlength"}}{ - This specifies that no list length should be used. If none of the list length options are specifed 'contlistlength' is used}
#' \item{\code{"jul_date"}}{ - This adds Julian date to the model as a normal distribution with its mean and standard deviation as monitered parameters.}
#' \item{\code{"intercept"}}{ - No longer available. Includes an intercept term in the state and observation model. By including intercept terms, the occupancy and detection probabilities in each year are centred on an overall mean level.}
#' \item{\code{"centering"}}{ - No longer available. Includes hierarchical centering of the model parameters. Centring does not change the model explicitly but writes it in a way that allows parameter estimates to be updated simultaneously.}
#' }
#' These options are provided as a vector of characters, e.g. \code{modeltype = c('indran', 'halfcauchy', 'catlistlength')}
#'
#' @return A list including the model, JAGS model output, the path of the model file used and information on the number of iterations, first year, last year, etc.
#' Key aspects of the model output include:
#' \itemize{
#' \item{\code{"out$model"}}{ - The model used as provided to JAGS. Also contained is a list of fully observed variables. These are those listed in the BUGS data.}
#' \item{\code{"out$BUGSoutput$n.chains"}}{ - The number of Markov chains ran in the MCMC simulations.}
#' \item{\code{"out$BUGSoutput$n.iter"}}{ - The total number of iterations per chain.}
#' \item{\code{"out$BUGSoutput$n.burnin"}}{ - The number of interations discarded from the start as a burn-in period.}
#' \item{\code{"out$BUGSoutput$n.thin"}}{ - The thinning rate used. For example a thinning rate of 3 retains only every third iteration. This is used to reduce autocorrelation.}
#' \item{\code{"out$BUGSoutput$n.keep"}}{ - The number of iterations kept per chain. This is the total number of iterations minus the burn-in then divided by the thinning rate.}
#' \item{\code{"out$BUGSoutput$n.sims"}}{ - The total number of iterations kept.}
#' \item{\code{"out$BUGSoutput$summary"}}{ - A summary table of the monitored parameter. The posterior distribution for each parameter is summaried with the mean, standard deviation, various credible intervals, a formal convergence metric (Rhat), and a measure of effective sample size (n.eff).}
#' \item{\code{"out$BUGSoutput$mean"}}{ - the mean values for all monitored parameters}
#' \item{\code{"out$BUGSoutput$sd"}}{ - the standard deviation values for all monitored parameters}
#' \item{\code{"out$BUGSoutput$median"}}{ - the median values for all monitored parameters}
#' \item{\code{"out$parameters.to.save"}}{ - The names of all monitored parameters.}
#' \item{\code{"out$BUGSoutput$model.file"}}{ - The user provided or temporary generated model file detailing the occupancy model.}
#' \item{\code{"out$n.iter"}}{ - The total number of interations per chain.}
#' \item{\code{"out$DIC"}}{ - Whether the Deviance Information Criterion (DIC) is calculated.}
#' \item{\code{"out$BUGSoutput$sims.list"}}{ - A list of the posterior distribution for each monitored parameter. Use sims.array and sims.matrix if a different format of the posteriors is desired.}
#' \item{\code{"out$SPP_NAME"}}{ - The name of the study species.}
#' \item{\code{"out$min_year"}}{ - First year of data included in the occupancy model run.}
#' \item{\code{"out$max_year"}}{ - Final year of data included in the occupancy model run or final year specified by the user.}
#' \item{\code{"out$nsite"}}{ - The number of unique sites included in the occupancy model run.}
#' \item{\code{"out$nvisits"}}{ - The number of unique visits included int he occupancy model run.}
#' \item{\code{"out$species_sites"}}{ - The number of unique sites the species of interest was recorded in.}
#' \item{\code{"out$species_observations"}}{ - The number of unique records for the species of interest.}
#' \item{\code{"out$regions"}}{ - The names of the regions included in the model run.}
#' \item{\code{"out$region_aggs"}}{ - The names of the region aggregates included in the model run.}
#' \item{\code{"out$nsites_region"}}{ - Named vector containing the number of sites in each region included in the occupancy model run.}
#' }
#'
#' @keywords trends, species, distribution, occupancy, bayesian, modeling
#' @references Isaac, N.J.B., van Strien, A.J., August, T.A., de Zeeuw, M.P. and Roy, D.B. (2014).
#' Statistics for citizen science: extracting signals of change from noisy ecological data.
#' \emph{Methods in Ecology and Evolution}, 5: 1052-1060.
#' @references Outhwaite, C.L., Chandler, R.E., Powney, G.D., Collen, B., Gregory, R.D. & Isaac, N.J.B. (2018).
#' Prior specification in Bayesian occupancy modelling improves analysis of species occurrence data.
#' \emph{Ecological Indicators}, 93: 333-343.
#' @references Pocock, Logie, Isaac, Outhwaite & August. Rapid assessment of the suitability of multi-species citizen science datasets for occupancy trend analysis. \emph{bioRxiv} 813626 (2019) doi:10.1101/813626.
#'
#' @examples
#' \dontrun{
#'
#' set.seed(123)
#'
#' # Create data
#' n <- 15000 #size of dataset
#' nyear <- 20 # number of years in data
#' nSamples <- 100 # set number of dates
#' nSites <- 50 # set number of sites
#'
#' # Create somes dates
#' first <- as.Date(strptime("2010/01/01", format="%Y/%m/%d"))
#' last <- as.Date(strptime(paste(2010+(nyear-1),"/12/31", sep=''), format="%Y/%m/%d"))
#' dt <- last-first
#' rDates <- first + (runif(nSamples)*dt)
#'
#' # taxa are set as random letters
#' taxa <- sample(letters, size = n, TRUE)
#'
#' # sites are visited randomly
#' site <- sample(paste('A', 1:nSites, sep=''), size = n, TRUE)
#'
#' # the date of visit is selected at random from those created earlier
#' survey <- sample(rDates, size = n, TRUE)
#'
#' # Format the data
#' visitData <- formatOccData(taxa = taxa, site = site, survey = survey)
#'
#' # run the model with these data for one species (very small number of iterations)
#' results <- occDetFunc(taxa_name = taxa[1],
#' n_iterations = 50,
#' burnin = 15,
#' occDetdata = visitData$occDetdata,
#' spp_vis = visitData$spp_vis,
#' write_results = FALSE,
#' provenance = "sparta test dataset")
#' }
#' @export
#' @importFrom reshape2 acast
#' @import LearnBayes
occDetFunc <- function (taxa_name, occDetdata, spp_vis, n_iterations = 5000, nyr = 2,
burnin = 1500, thinning = 3, n_chains = 3, write_results = TRUE,
output_dir = getwd(), modeltype = 'sparta', max_year = NULL,
seed = NULL, model.function = NULL, regional_codes = NULL,
region_aggs = NULL, additional.parameters = NULL,
additional.BUGS.elements = NULL, additional.init.values = NULL,
return_data = FALSE, criterion = 1, provenance = NULL, saveMatrix = FALSE,
rem_aggs_with_missing_regions = TRUE){
################## BASIC CHECKS
# first run the error checks
errorChecks(n_iterations = n_iterations, burnin = burnin,
thinning = thinning, n_chains = n_chains, seed = seed)
# Set seed for repeatability
if(!is.null(seed)) set.seed(seed)
# Check the taxa_name is one of my species
if(!taxa_name %in% colnames(spp_vis)) stop('taxa_name is not the name of a taxa in spp_vis')
##################
min_year_original <- min_year <- min(occDetdata$TP)
# only include sites which have more than nyr of records
yps <- rowSums(acast(occDetdata, site ~ TP, length, value.var = 'L') > 0)
sites_to_include <- names(yps[yps >= nyr])
# strip out the visits to sites that were visited in just one year
i <- occDetdata$site %in% sites_to_include
if(sum(i) > 0){
occDetdata <- occDetdata[i,]
spp_vis <- spp_vis[i,]
} else stop(paste0("There are no sites visited in at least ", nyr, " years."))
# calcluate a set of data metrics for this species
data_Metrics <- dataMetrics(sp = taxa_name,
formattedData = list(occDetdata=occDetdata, spp_vis=spp_vis))
is.wholenumber <-
function(x, tol = .Machine$double.eps^0.5) {
if(is.numeric(x)) abs(x - round(x)) < tol
else FALSE
}
# check there is enough data to run a model. If so, proceed with the main event
if(is.wholenumber(criterion)) {
# the criterion is a whole number. this defines the number of records
# check whether the number of records meets this value
proceed <- sum(spp_vis[,taxa_name]) >= criterion
} else if(criterion == "EqualWt") {
proceed <- applyRuleOfThumb(data_Metrics, "EqualWt")
} else if(criterion == "HighSpec") {
proceed <- applyRuleOfThumb(data_Metrics, "HighSpec")
} else
stop("Criterion must be either an integer, `EqualWt` or `HighSpec`")
if(!proceed){
# there is not enough data: set the outputs accordingly
bugs_data <- list(y=0,nsite=0,nvisit=0)
BD_MD <- error_status <- site_match <- modelcode <- NA
warning(paste(taxa_name,
"has insufficient data after site filtering. Either decrease nyr or change the criterion"))
out <- list(message = "No model run: insufficient data")
} else {
# There is enough data: we can proceed with the main event
# Check if R2jags is installed
if (!requireNamespace("R2jags", quietly = TRUE)) {
stop("Package 'R2jags' is needed for the 'occDetModel' function to work. Please insatll this from CRAN. You will also be required to install JAGS, which you can download from https://sourceforge.net/projects/mcmc-jags/files/JAGS/",
call. = FALSE)
}
# If doing regional we take control of model specification
if(!is.null(regional_codes)){
oldmodeltype <- modeltype
modeltype <-c('ranwalk', 'halfcauchy',
c('jul_date', 'catlistlength')[c('jul_date', 'catlistlength') %in% modeltype])
if(!all(oldmodeltype %in% modeltype))
message('When using regional data the model specification will be set to ranwalk, halfcauchy. jul_date and catlistlength can still be specified by the user')
}
if(!'catlistlength' %in% modeltype & !'nolistlength' %in% modeltype){
modeltype <- c(modeltype, 'contlistlength')
}
if(!any(c('catlistlength', 'nolistlength', 'contlistlength') %in% modeltype)){
stop('modeltype should contain one of "catlistlength", "nolistlength", "contlistlength",
which specify the list-length effect to be included')
}
# Do we have JAGS installed - this works only on windows
if(.Platform$OS.type == "windows"){
JAGS_test <- Sys.which(names = 'jags-terminal.exe')
if(JAGS_test[[1]] == '') stop('R cannot find jags-terminal.exe, check that you have installed JAGS')
}
# Add the focal column (was the species recorded on the visit?). Use the spp_vis dataframe to extract this info
nrow1 <- nrow(occDetdata)
occDetdata <- merge(occDetdata, spp_vis[,c("visit", taxa_name)])
if(nrow1 != nrow(occDetdata)) stop('some visits have been lost')
names(occDetdata)[names(occDetdata) == taxa_name] <- "focal"
# If we are using regional codes do some checks
if(!is.null(regional_codes)){
if(!inherits(regional_codes, 'data.frame')) stop("regional_codes should be a data.frame")
# check whether there is a column called "site".
#If not, let's assume that the site column is the first in the dataframe
#NB previous behaviour was to assume *both* that it was column 1 and named 'site'
if(!"site" %in% names(regional_codes)) {
warning(paste0("renaming ", names(regional_codes)[1], " as 'site'"))
names(regional_codes)[1] <- "site"
}
# remove locations that are not in the data
abs_sites <- as.character(regional_codes$site)[!as.character(regional_codes$site) %in% as.character(occDetdata$site)]
if(length(abs_sites) > 0){
warning(paste(length(abs_sites), 'sites are in regional_codes but not in occurrence data'))
}
if(any(is.na(regional_codes))){
warning(paste(sum(is.na(regional_codes)),
"NAs are present in regional_codes, these will be replaced with 0s"))
regional_codes[is.na(regional_codes)] <- 0
}
sites_no_region <- as.character(regional_codes$site[rowSums(regional_codes[,2:ncol(regional_codes)]) == 0])
sites_multi_region <- as.character(regional_codes$site[rowSums(regional_codes[,2:ncol(regional_codes)]) > 1])
site_counts <- table(regional_codes[,1])
sites_multi_row <- names(site_counts[site_counts > 1])
if(length(sites_no_region) > 0)
warning(paste(length(sites_no_region), 'sites are not assigned to a region in regional_codes and will be removed'))
if(length(sites_multi_region) > 0)
warning(paste(length(sites_multi_region), 'sites are assigned to more than one region in regional_codes and will be removed'))
if(length(sites_multi_row) > 0)
warning(paste(length(bad_sites), 'site(s) are present in more than one region and will be removed'))
# finally check that every site with species data also has a region
sites_no_region2 <- setdiff(sites_to_include, as.character(regional_codes$site))
if(length(sites_no_region2) >= 1)
warning(paste(length(sites_no_region2), 'sites are in occurrence data but not in regional data and will be removed'))
# strip these same sites out of the occDetdata & the regional codes
bad_sites <- unique(c(abs_sites, sites_multi_row, sites_multi_region, sites_no_region, sites_no_region2))
regional_codes <- subset(regional_codes, !site %in% bad_sites)
occDetdata <- subset(occDetdata, !site %in% bad_sites)
}
# If we are using regional aggregates do some checks
if(!is.null(region_aggs)){
if(is.null(regional_codes)) stop('Cannot use regional aggregates if regional_codes is not supplied')
stopifnot(inherits(region_aggs, 'list'))
if(!all(unique(unlist(region_aggs)) %in% tail(colnames(regional_codes), -1))){
stop(paste0('Aggregate members [',
paste(unique(unlist(region_aggs))[!unique(unlist(region_aggs)) %in% tail(colnames(regional_codes), -1)],
collapse = ', '),
'] not in regional_codes column names [',
paste(tail(colnames(regional_codes), -1),
collapse = ', '),
']'))
}
}
# look for missing years before time frame can be extended using max_year parameter
years <- (max(occDetdata$TP) - min(occDetdata$TP))+1
if(length(unique(occDetdata$TP)) != years) {
# find out which years have no data
missing_yrs <- with(occDetdata, setdiff(min(TP):max(TP), unique(TP)))
if(length(missing_yrs) ==1)
error_msg <- paste0('There are no visits in year ', missing_yrs,'. This means there is no data, for any species in this year. BUGS cannot run if there is a year with no data. Quitting...')
else
error_msg <- paste0('There are ', length(missing_yrs),' years with no visits, including ', missing_yrs[1],'. This means there is no data, for any species in these years. BUGS cannot run if any year has no data. Quitting...')
stop(error_msg)
}
# year and site need to be numeric starting from 1 to length of them. This is due to the way the bugs code is written
nsite <- length(unique(occDetdata$site))
site_match <- data.frame(name = unique(occDetdata$site), id = 1:nsite)
occDetdata <- merge(occDetdata, site_match, by.x='site', by.y="name")
# need to get a measure of whether the species was on that site in that year, unequivocally, in zst
zst <- acast(occDetdata, id ~ factor(TP), value.var = 'focal', max, fill = 0) # initial values for the latent state = observed state
# Calculate min year. We're doing it now as it's fine if we've dropped the first year(s) of data and nothing in the middle
min_year <- min(occDetdata$TP)
if(min_year != min_year_original){
warning(paste0('\nThe first year of the data has changed, as the first years of data were dropped.\n',
'The original first year was ',min_year_original,'. It is now ',min_year,'\n'))
}
# if the max_year is not null, edit the zst table to add the additional years required
if(!is.null(max_year)){
# check that max_year is a numeric value
if(!is.numeric(max_year)) stop('max_year should be a numeric value')
# check that max_year is greater than the final year of the dataset
if(max_year <= max(occDetdata$TP)) stop('max_year should be greater than the final year of available data')
nTP <- max_year - min_year + 1
# if nyear is greater than the number of years due to different specification of max_year,
# add on additional columns to zst so that inital values can be create for these years.
if(nTP > ncol(zst)){
# work out how many columns need to be added
to_add <- nTP - ncol(zst)
zst <- cbind(zst, matrix(0, ncol = to_add, nrow = nrow(zst)))
# add column names
colnames(zst) <- 1:nTP
}
# if a value has not been selected for max_year then continue as before
}else{
# record the max year
max_year <- max(occDetdata$TP)
nTP <- max_year - min_year + 1
}
# look for time periods with no data
if(length(unique(occDetdata$TP)) != nTP) stop('It looks like you have time periods with no data. This will crash BUGS')
# TP and site need to be numeric starting from 1 to length of them. This is due to the way the bugs code is written
occDetdata$TP <- occDetdata$TP - min(occDetdata$TP) + 1
# Parameter you wish to monitor, shown in the output
parameters <- c("psi.fs", "tau2", "tau.lp", "alpha.p", "a")
# If ranwalk + halfcauchy monitor mu.lp
if(all(c('ranwalk', 'halfcauchy') %in% modeltype)){
if(!'centering' %in% tolower(modeltype) & !'intercept' %in% tolower(modeltype)){
parameters <- c(parameters, "mu.lp")
}
}
# If sparta monitor mu.lp
if('sparta' %in% tolower(modeltype)) {
parameters <- c(parameters, "mu.lp")
}
# Add user specified paramters if given
if(!is.null(additional.parameters)) parameters <- c(parameters, additional.parameters)
# Add parameters for each of the model types
for(ptype in modeltype){
parameters <- getParameters(parameters, modeltype = ptype)
}
# add parameters for regions
if(!is.null(regional_codes)){
# remove spaces from region names, then extract them
colnames(regional_codes)[-1] <- gsub(' ', '_', colnames(regional_codes)[-1])
region_names <- colnames(regional_codes)[-1]
parameters <- c(parameters,
paste0("psi.fs.r_", region_names),
paste0("a_", region_names))
# ignore some parameters not used in regions model
parameters <- parameters[!parameters %in% c('a')]
}
# add parameters for regional aggregates
if(!is.null(region_aggs)){
parameters <- c(parameters, paste0('psi.fs.r_', names(region_aggs)))
}
# now assemble the bugs_data and related objects
# HERE IS THE BUGS DATA
bugs_data <- with(occDetdata,
list(y = as.numeric(focal), Year = TP, Site = id,
nyear = nTP, nsite = nsite, nvisit = nrow(occDetdata)))
# temporary test
if(max(occDetdata$id) != bugs_data$nsite) stop(paste0("Site idenitifier exceeds nsite (",
max(occDetdata$id), nsite,")"))
for(btype in modeltype){ # one call per element of modeltype: each adds a section
bugs_data <- getBugsData(bugs_data, modeltype = btype,
occDetData = occDetdata)
}
# Add additional elements if specified
if(!is.null(additional.BUGS.elements)){
if(!is.list(additional.BUGS.elements)){
stop("additional.BUGS.elements must be a list")
} else {
bugs_data <- c(bugs_data, additional.BUGS.elements)
}
}
# make a copy of the bugs_data to calculate metadata from
bugs_data_copy <- with(occDetdata, data.frame(y = as.numeric(focal), year = TP, site = site))
BD_MD <- list()
# Add regional elements to bugs data
if(!is.null(regional_codes)){
# removed unwanted bugs elements
bugs_data <- bugs_data[!names(bugs_data) %in% c('psi0.a', 'psi0.b')]
# expand the lookup table to include regions
regional_lookup <- merge(regional_codes, site_match, by.y="name", by.x="site")
zero_sites <- NULL
for(region in region_names){
if(sum(regional_codes[ , region]) != 0){
bugs_data[paste0('r_', region)] <- list(regional_lookup[order(regional_lookup$id),region])
bugs_data[paste0('nsite_r_', region)] <- sum(regional_codes[, region])
} else {
zero_sites <- c(zero_sites, region)
}
}
if(!is.null(zero_sites)){
warning(paste('The following regions have no data and should not be modelled:',
paste(zero_sites, collapse = ', '),
'- These regions will not be included in the model'))
# remove parameters
parameters <- parameters[!parameters %in% c(paste0("psi.fs.r_", zero_sites),
paste0("a_", zero_sites))]
# remove regions for regions_codes
regional_codes <- regional_codes[ ,!colnames(regional_codes) %in% zero_sites]
region_names <- setdiff(region_names, zero_sites)
# remove region aggregates
if(rem_aggs_with_missing_regions){
rem_aggs <- unlist(lapply(region_aggs, FUN = function(x) any(zero_sites %in% x)))
rem_aggs_names <- names(region_aggs)[rem_aggs]
# remove aggs if you need to
if(length(rem_aggs_names) > 0){
warning(paste('The following region aggregates have to be removed as they contain a region with no data:',
paste(rem_aggs_names, collapse = ', '),
'- These region aggregates will not be included in the model\n',
'If you want to keep aggregates with one or more missing regions,',
'set rem_aggs_with_missing_regions=FALSE'))
region_aggs <- region_aggs[!names(region_aggs) %in% rem_aggs_names]
parameters <- parameters[!parameters %in% paste0('psi.fs.r_', rem_aggs_names)]
}
} else {
rem_aggs <- unlist(lapply(region_aggs, FUN = function(x) all(x %in% zero_sites)))
rem_aggs_names <- names(region_aggs)[rem_aggs]
edit_aggs <- unlist(lapply(region_aggs, FUN = function(x) any(zero_sites %in% x)))
edit_aggs_names <- names(region_aggs)[edit_aggs & !(rem_aggs)]
# edit aggs if you need to
if(length(edit_aggs_names) > 0){
warning(paste('The following region aggregates have to be edited as they contain regions with no data:',
paste(edit_aggs_names, collapse = ', '),
'\n- These region aggregates will still be included in the model\n'))
# Recreate aggs, removing regions without data
region_aggs_new <- lapply(region_aggs, FUN = function(agg){
agg[!(agg %in% zero_sites)]
})
names(region_aggs_new) <- names(region_aggs)
region_aggs <- region_aggs_new
}
# remove aggs completely if you need to
if(length(rem_aggs_names) > 0){
warning(paste('The following region aggregates have to be removed as all regions within them have no data:',
paste(rem_aggs_names, collapse = ', '),
'- These region aggregates will not be included in the model'))
region_aggs <- region_aggs[!names(region_aggs) %in% rem_aggs_names]
parameters <- parameters[!parameters %in% paste0('psi.fs.r_', rem_aggs_names)]
}
}
}
regions_years <- list()
regions_nobs <- list()
regions_sites <-list()
bugs_data_copy <- merge(bugs_data_copy, regional_codes, all.x = TRUE)
# add regional codes to this copy and get n_obs, max and min years and year gaps for each region
for(region_name in region_names){
regions_nobs[paste0('n_obs_','r_', region_name)] <- sum(bugs_data_copy$y * bugs_data_copy[,region_name])
regions_sites[paste0('n_sites_','r_', region_name)] <- sum(bugs_data_copy[,region_name])
current_r <- bugs_data_copy$y * bugs_data_copy[,region_name] * bugs_data_copy$year
current_r <- subset(current_r,current_r !=0)
if(length(current_r) > 2){
current_rmin <- (min_year-1) + min(current_r)
current_rmax <- (min_year-1) + max(current_r)
regions_years[paste0('min_year_data_','r_', region_name)] <- current_rmin
regions_years[paste0('max_year_data_','r_', region_name)] <- current_rmax
current_datagaps <- dataGaps(current_r, min_year, max_year, current_rmin, current_rmax)
regions_years[paste0('gap_start_','r_', region_name)] <- current_datagaps$gap_start
regions_years[paste0('gap_end_','r_', region_name)] <- current_datagaps$gap_end
regions_years[paste0('gap_middle_','r_', region_name)] <- current_datagaps$gap_middle
} else if(length(current_r) == 1) {
current_rmin <- (min_year-1) + min(current_r)
current_rmax <- (min_year-1) + max(current_r)
regions_years[paste0('min_year_data_','r_', region_name)] <- current_rmin
regions_years[paste0('max_year_data_','r_', region_name)] <- current_rmax
current_datagaps <- dataGaps(current_r, min_year, max_year, current_rmin, current_rmax)
regions_years[paste0('gap_start_','r_', region_name)] <- current_datagaps$gap_start
regions_years[paste0('gap_end_','r_', region_name)] <- current_datagaps$gap_end
regions_years[paste0('gap_middle_','r_', region_name)] <- NA
} else if(length(current_r) < 1){
regions_years[paste0('min_year_data_','r_', region_name)] <- NA
regions_years[paste0('max_year_data_','r_', region_name)] <- NA
regions_years[paste0('gap_start_','r_', region_name)] <- NA
regions_years[paste0('gap_end_','r_', region_name)] <- NA
regions_years[paste0('gap_middle_','r_', region_name)] <- NA
}
}
}
# add max and min data years for the whole dataset
all_years_data <- bugs_data_copy$y * bugs_data_copy$year
all_years_data <- subset(all_years_data, all_years_data !=0)
BD_MD$min_year_data <- (min_year-1) + min(all_years_data)
BD_MD$max_year_data <- (min_year-1) + max(all_years_data)
# use these to find year gap data
BD_MD$yeargaps<-dataGaps(all_years_data, min_year, max_year, BD_MD$min_year_data, BD_MD$max_year_data)
initiate <- function(z, nTP, bugs_data) {
init <- list (z = z,
alpha.p = rep(runif(1, -2, 2),
nTP),
a = rep(runif(1, -2, 2), nTP),
eta = rep(runif(1, -2, 2), bugs_data$nsite))
# add extra init values if needed
for(itype in modeltype){
init <- getInitValues(init, modeltype = itype)
}
# if ranwalk + centreing a -> aa
if(all(c('ranwalk', 'centering') %in% modeltype)){
names(init)[names(init) == 'a'] <- 'aa'
}
# add user specified values
if(!is.null(additional.init.values)){
if(!is.list(additional.init.values)){
stop("additional.BUGS.elements must be a list")
} else {
init <- c(init, additional.init.values)
}
}
return(init)
}
# set the initial values...
init.vals <- replicate(n_chains, initiate(z = zst,
nTP = nTP,
bugs_data = bugs_data),
simplify = F)
# modify initial values for regional model
if(!is.null(regional_codes)){
# remove initial values for a and psi
init.vals <- lapply(init.vals, FUN = function(x){
x[!names(x) %in% c('psi0', 'a')]
})
}
# Select the correct model file
if(is.null(model.function)){
model.file <- getModelFile(modeltype,
regional_codes = regional_codes,
region_aggs = region_aggs)
} else {
cat('Using user model.function')
model.file <- model.function
}
modelcode <- paste(readLines(model.file), collapse = '\n')
### REVIEW CODE
cat('#### PLEASE REVIEW THE BELOW ####\n\n')
cat('Your model settings:', paste(modeltype, collapse = ', '))
cat('\n\nModel File:\n\n')
cat(modelcode)
cat('\n\nbugs_data:\n\n')
cat(str(bugs_data))
cat('\n\ninit.vals:\n\n')
cat(str(init.vals))
cat('\n\nparameters:\n\n')
cat(parameters)
###
if(identical(model.file, occDetBUGScode)){
warning("You have selected a formulation with potentially informative priors that are subject to boundary effects (See Outhwaite et al 2018 for details).
This option is retained within sparta for backwards compatibility only: we strongly recommend that you do
not use this option for inferring changes in species' distributions")
}
error_status <- try(
out <- R2jags::jags(bugs_data, init.vals, parameters, model.file = model.file,
n.chains = n_chains, n.iter = n_iterations, n.thin = thinning,
n.burnin = burnin, DIC = TRUE)
)
dir.create(path = output_dir, showWarnings = FALSE) # create the top results folder
if (class(error_status) == "try-error" ){
warning('JAGS returned an error when modelling', taxa_name, 'error:', error_status[1])
return(NULL)
}
} # end of "if(proceed)"
########################################## Add metadata
# calcluate number of site:year combinations with repeat visits (across the whole dataset)
temp <- as.data.frame(with(occDetdata, table(site, TP)))$Freq
prop_visits_repeated <- mean(temp[temp>0] > 1)
if(is.null(attributes(occDetdata))){
metadata <- list()
} else if('metadata' %in% names(attributes(occDetdata))){
metadata <- attr(occDetdata, 'metadata')
} else {
metadata <- list()
}
# get the sessionInfo and coerce into a useable format
session.info <- sessionInfo()
packages <- c(sapply(session.info[7][[1]], function(x) x$Version),
sapply(session.info[8][[1]], function(x) x$Version))
MD <- list(method = 'sparta::occDetFunc',
call = call <- match.call(),
date = Sys.Date(),
user = Sys.info()['user'],
summary = list(species = taxa_name,
n_sites = length(unique(occDetdata$site)),
n_years = length(unique(occDetdata$TP)),
n_visits = nrow(occDetdata),
n_obs = sum(occDetdata$focal),
n_species_sites <- length(unique(subset(occDetdata, focal=TRUE)$site)),
min_year_model = min_year,
max_year_model = max_year),
gaps = ifelse(is.na(BD_MD), NA, list(
min_year_data = BD_MD$min_year_data,
max_year_data = BD_MD$max_year_data,
gap_start = BD_MD$yeargaps$gap_start,
gap_end = BD_MD$yeargaps$gap_end,
gap_middle = BD_MD$yeargaps$gap_middle)),
spp_Metrics = as.list(data_Metrics),
dataset_Metrics = list(# dataset properties
totalObservations = sum(occDetdata$L),
propRepeats = prop_visits_repeated),
provenance = provenance,
output_path = ifelse(test = write_results,
file.path(getwd(), output_dir, paste(taxa_name, ".rdata", sep = "")),
NA),
session_info = list(session.info[-c(7:8)],
packages)
)
# add regional elements if applicable
if(!is.null(regional_codes) & proceed){
MD$summary$region_nobs <- regions_nobs
MD$summary$region_years <- regions_years
MD$summary$region_nsite <- regions_sites
}else{
MD$summary$region_nobs <- NA
MD$summary$region_years <- NA
MD$summary$region_nsite <- NA
}
# If the data coming in is the result of analysis we want to
# append this data
name <- 'analysis'
i = 1
while(name %in% names(metadata)){
name <- paste0(name, i)
i = i + 1
}
metadata[name] <- list(MD)
attr(out, 'metadata') <- metadata
if(!saveMatrix) out$BUGSoutput$sims.matrix <- NULL
out$SPP_NAME <- taxa_name
out$min_year <- min_year
out$max_year <- max_year
out$sites_included <- ifelse(test = proceed, yes = site_match, no = NA)
out$nsites <- bugs_data$nsite
out$nvisits <- bugs_data$nvisit
out$species_observations <- sum(bugs_data$y)
out$sparta_version <- packages["sparta"]
if(!is.null(regional_codes)) out$regions <- region_names
if(!is.null(region_aggs)) out$region_aggs <- region_aggs
if(return_data) out$bugs_data <- bugs_data
attr(out, 'modeltype') <- modeltype
attr(out, 'modelcode') <- modelcode
class(out) <- 'occDet'
if(write_results) save(out, file = file.path(output_dir, paste(taxa_name, ".rdata", sep = "")))
return(out)
}
| /R/occDetFunc.r | permissive | yangxhcaf/sparta | R | false | false | 41,747 | r | #' Occupancy detection Function
#'
#' Run occupancy detection models using the output from \code{formatOccData}
#'
#' This function requires both the R package R2jags and the program JAGS.
#' These are not installed by default when sparta is loaded and so should be
#' installed by the user. More details can be found in teh vignette.
#'
#' @param taxa_name A character giving the name of the species to be modelled.
#' @param occDetdata The 2nd element of the object returned by formatOccData.
#' @param spp_vis The 1st element of the object returned by formatOccData.
#' @param n_iterations numeric, An MCMC parameter - The number of interations
#' @param nyr numeric, the minimum number of periods on which a site must have records for it
#' to be included in the models. Defaults to 2
#' @param burnin numeric, An MCMC parameter - The length of the burn in
#' @param thinning numeric, An MCMC parameter - The thinning factor
#' @param n_chains numeric, an MCMC parameter - The number of chains to be run
#' @param write_results logical, should results be saved to \code{output_dir}. This is
#' recommended since these models can take a long time to run. If \code{TRUE} (default)
#' the results from each species will be saved as an .rdata file once the species
#' has run. This prevents loss of data should anything go wrong.
#' @param output_dir character, the output directory were the output for each taxa will be saved
#' as .rdata files. This will defualt to the working directory
#' @param modeltype A character string or vector of strings that specifies the model to use. See details. If
#' used then model.function is ignored.
#' @param regional_codes A data.frame object detailing which site is associated with which region.
#' each row desginates a site and each column represents a region. The first column represents the
#' site name (as in \code{site}). Subsequent columns are named for each regions with 1 representing
#' the site is in that region and 0 that it is not. NOTE a site should only be in one region
#' @param region_aggs A named list giving aggregations of regions that you want trend
#' estimates for. For example \code{region_aggs = list(GB = c('england', 'scotland', 'wales'))}
#' will produced a trend for GB (Great Britain) as well as its constituent nations. Note that
#' 'england', scotland' and 'wales' must appear as names of columns in \code{regional_codes}.
#' More than one aggregate can be given, eg \code{region_aggs = list(GB = c('england', 'scotland',
#' 'wales'), UK = c('england', 'scotland', 'wales', 'northern_ireland'))}.
#' @param max_year numeric, final year to which analysis will be run, this can be set if it is beyond
#' the limit of the dataset. Defaults to final year of the dataset.
#' @param seed numeric, uses \code{set.seed} to set the randon number seed. Setting
#' this number ensures repeatabl analyses
#' @param model.function optionally a user defined BUGS model coded as a function (see \code{?jags},
#' including the example there, for how this is done)
#' @param additional.parameters A character vector of additional parameters to monitor
#' @param additional.BUGS.elements A named list giving additioanl bugs elements passed
#' to \code{R2jags::jags} 'data' argument
#' @param additional.init.values A named list giving user specified initial values to
#' be added to the defaults.
#' @param return_data Logical, if \code{TRUE} (default) the BUGS data object is returned with the data
#' @param saveMatrix Logical, if \code{FALSE} (default) the sims.matrix element of the jags object is omitted, in order to reduce the filesize.
#' @param criterion Determines whether the model should be run. If an integer then this defines the threshold number of records (50 in Outhwaite et al 2019).
#' Other options are `EqualWt` or `HighSpec`, which define the application of "rules of thumb" defined in Pocock et al 2019.
#' Defaults to 1, in which case the model is applied for so long there is a single record of the focal species.
#' @param provenance An optional text string allowing the user to identify the dataset.
#' @param rem_aggs_with_missing_regions An option which if TRUE will remove all aggregates which contain at least one region with no data.
#' If FALSE, only aggregates where ALL regions in that aggregate contain no data, are dropped. Defaults to TRUE
#'
#' @details \code{modeltype} is used to choose the model as well as the associated initial values,
#' and parameters to monitor. Elements to choose from can be separated into the following components:
#'
#' A. Prior type: this has 3 options, each of which was tested in Outhwaite et al (in review):
#' 1. sparta - This uses the same model as in Isaac et al (2014).
#' 2. indran - This is the adaptive stationary model.
#' 3. ranwalk - This is the random walk model.
#'
#' B. Hyperprior type: This has 3 options, each of these are discussed in Outhwaite et al (in review):
#' 1. halfuniform - the original formulation in Isaac et al (2014).
#' 2. halfcauchy - preferred form, tested in Outhwaite et al (2018).
#' 3. inversegamma - alternative form presented in the literature.
#'
#' C. List length specification: This has 3 options:
#' 1. catlistlength - list length as a categorical variable.
#' 2. contlistlength - list length as a continuous variable.
#' 3. nolistlength - no list length variable.
#'
#' D. Julian date: this is an additional option for including Julian date within the detection model:
#' 1. jul_date.
#'
#' Not all combinations are available in sparta. You will get an error if you try and use
#' a combination that is not supported. There is usually a good reason why that
#' combination is not a good idea. Here are the model elements available:
#'
#' \itemize{
#' \item{\code{"sparta"}}{ - This uses the same model as in Isaac et al (2014)}
#' \item{\code{"indran"}}{ - Here the prior for the year effect of the state model is modelled as a random effect. This allows the model to adapt to interannual variability.}
#' \item{\code{"ranwalk"}}{ - Here the prior for the year effect of the state model is modelled as a random walk. Each estimate for the year effect is dependent on that of the previous year.}
#' \item{\code{"halfcauchy"}}{ - Includes half-Cauchy hyperpriors for all random effects within the model. The half-Cauchy is a special case of the Student’s t distribution with 1 degree of freedom. }
#' \item{\code{"inversegamma"}}{ - Includes inverse-gamma hyperpriors for random effects within the model}
#' \item{\code{"catlistlength"}}{ - This specifies that list length should be considered as a catagorical variable. There are 3 classes, lists of length 1, 2-3, and 4 and over. If none of the list length options are specifed 'contlistlength' is used}
#' \item{\code{"contlistlength"}}{ - This specifies that list length should be considered as a continious variable. If none of the list length options are specifed 'contlistlength' is used}
#' \item{\code{"nolistlength"}}{ - This specifies that no list length should be used. If none of the list length options are specifed 'contlistlength' is used}
#' \item{\code{"jul_date"}}{ - This adds Julian date to the model as a normal distribution with its mean and standard deviation as monitered parameters.}
#' \item{\code{"intercept"}}{ - No longer available. Includes an intercept term in the state and observation model. By including intercept terms, the occupancy and detection probabilities in each year are centred on an overall mean level.}
#' \item{\code{"centering"}}{ - No longer available. Includes hierarchical centering of the model parameters. Centring does not change the model explicitly but writes it in a way that allows parameter estimates to be updated simultaneously.}
#' }
#' These options are provided as a vector of characters, e.g. \code{modeltype = c('indran', 'halfcauchy', 'catlistlength')}
#'
#' @return A list including the model, JAGS model output, the path of the model file used and information on the number of iterations, first year, last year, etc.
#' Key aspects of the model output include:
#' \itemize{
#' \item{\code{"out$model"}}{ - The model used as provided to JAGS. Also contained is a list of fully observed variables. These are those listed in the BUGS data.}
#' \item{\code{"out$BUGSoutput$n.chains"}}{ - The number of Markov chains ran in the MCMC simulations.}
#' \item{\code{"out$BUGSoutput$n.iter"}}{ - The total number of iterations per chain.}
#' \item{\code{"out$BUGSoutput$n.burnin"}}{ - The number of interations discarded from the start as a burn-in period.}
#' \item{\code{"out$BUGSoutput$n.thin"}}{ - The thinning rate used. For example a thinning rate of 3 retains only every third iteration. This is used to reduce autocorrelation.}
#' \item{\code{"out$BUGSoutput$n.keep"}}{ - The number of iterations kept per chain. This is the total number of iterations minus the burn-in then divided by the thinning rate.}
#' \item{\code{"out$BUGSoutput$n.sims"}}{ - The total number of iterations kept.}
#' \item{\code{"out$BUGSoutput$summary"}}{ - A summary table of the monitored parameter. The posterior distribution for each parameter is summaried with the mean, standard deviation, various credible intervals, a formal convergence metric (Rhat), and a measure of effective sample size (n.eff).}
#' \item{\code{"out$BUGSoutput$mean"}}{ - the mean values for all monitored parameters}
#' \item{\code{"out$BUGSoutput$sd"}}{ - the standard deviation values for all monitored parameters}
#' \item{\code{"out$BUGSoutput$median"}}{ - the median values for all monitored parameters}
#' \item{\code{"out$parameters.to.save"}}{ - The names of all monitored parameters.}
#' \item{\code{"out$BUGSoutput$model.file"}}{ - The user provided or temporary generated model file detailing the occupancy model.}
#' \item{\code{"out$n.iter"}}{ - The total number of interations per chain.}
#' \item{\code{"out$DIC"}}{ - Whether the Deviance Information Criterion (DIC) is calculated.}
#' \item{\code{"out$BUGSoutput$sims.list"}}{ - A list of the posterior distribution for each monitored parameter. Use sims.array and sims.matrix if a different format of the posteriors is desired.}
#' \item{\code{"out$SPP_NAME"}}{ - The name of the study species.}
#' \item{\code{"out$min_year"}}{ - First year of data included in the occupancy model run.}
#' \item{\code{"out$max_year"}}{ - Final year of data included in the occupancy model run or final year specified by the user.}
#' \item{\code{"out$nsite"}}{ - The number of unique sites included in the occupancy model run.}
#' \item{\code{"out$nvisits"}}{ - The number of unique visits included int he occupancy model run.}
#' \item{\code{"out$species_sites"}}{ - The number of unique sites the species of interest was recorded in.}
#' \item{\code{"out$species_observations"}}{ - The number of unique records for the species of interest.}
#' \item{\code{"out$regions"}}{ - The names of the regions included in the model run.}
#' \item{\code{"out$region_aggs"}}{ - The names of the region aggregates included in the model run.}
#' \item{\code{"out$nsites_region"}}{ - Named vector containing the number of sites in each region included in the occupancy model run.}
#' }
#'
#' @keywords trends, species, distribution, occupancy, bayesian, modeling
#' @references Isaac, N.J.B., van Strien, A.J., August, T.A., de Zeeuw, M.P. and Roy, D.B. (2014).
#' Statistics for citizen science: extracting signals of change from noisy ecological data.
#' \emph{Methods in Ecology and Evolution}, 5: 1052-1060.
#' @references Outhwaite, C.L., Chandler, R.E., Powney, G.D., Collen, B., Gregory, R.D. & Isaac, N.J.B. (2018).
#' Prior specification in Bayesian occupancy modelling improves analysis of species occurrence data.
#' \emph{Ecological Indicators}, 93: 333-343.
#' @references Pocock, Logie, Isaac, Outhwaite & August. Rapid assessment of the suitability of multi-species citizen science datasets for occupancy trend analysis. \emph{bioRxiv} 813626 (2019) doi:10.1101/813626.
#'
#' @examples
#' \dontrun{
#'
#' set.seed(123)
#'
#' # Create data
#' n <- 15000 #size of dataset
#' nyear <- 20 # number of years in data
#' nSamples <- 100 # set number of dates
#' nSites <- 50 # set number of sites
#'
#' # Create somes dates
#' first <- as.Date(strptime("2010/01/01", format="%Y/%m/%d"))
#' last <- as.Date(strptime(paste(2010+(nyear-1),"/12/31", sep=''), format="%Y/%m/%d"))
#' dt <- last-first
#' rDates <- first + (runif(nSamples)*dt)
#'
#' # taxa are set as random letters
#' taxa <- sample(letters, size = n, TRUE)
#'
#' # sites are visited randomly
#' site <- sample(paste('A', 1:nSites, sep=''), size = n, TRUE)
#'
#' # the date of visit is selected at random from those created earlier
#' survey <- sample(rDates, size = n, TRUE)
#'
#' # Format the data
#' visitData <- formatOccData(taxa = taxa, site = site, survey = survey)
#'
#' # run the model with these data for one species (very small number of iterations)
#' results <- occDetFunc(taxa_name = taxa[1],
#' n_iterations = 50,
#' burnin = 15,
#' occDetdata = visitData$occDetdata,
#' spp_vis = visitData$spp_vis,
#' write_results = FALSE,
#' provenance = "sparta test dataset")
#' }
#' @export
#' @importFrom reshape2 acast
#' @import LearnBayes
occDetFunc <- function (taxa_name, occDetdata, spp_vis, n_iterations = 5000, nyr = 2,
burnin = 1500, thinning = 3, n_chains = 3, write_results = TRUE,
output_dir = getwd(), modeltype = 'sparta', max_year = NULL,
seed = NULL, model.function = NULL, regional_codes = NULL,
region_aggs = NULL, additional.parameters = NULL,
additional.BUGS.elements = NULL, additional.init.values = NULL,
return_data = FALSE, criterion = 1, provenance = NULL, saveMatrix = FALSE,
rem_aggs_with_missing_regions = TRUE){
################## BASIC CHECKS
# first run the error checks
errorChecks(n_iterations = n_iterations, burnin = burnin,
thinning = thinning, n_chains = n_chains, seed = seed)
# Set seed for repeatability
if(!is.null(seed)) set.seed(seed)
# Check the taxa_name is one of my species
if(!taxa_name %in% colnames(spp_vis)) stop('taxa_name is not the name of a taxa in spp_vis')
##################
min_year_original <- min_year <- min(occDetdata$TP)
# only include sites which have more than nyr of records
yps <- rowSums(acast(occDetdata, site ~ TP, length, value.var = 'L') > 0)
sites_to_include <- names(yps[yps >= nyr])
# strip out the visits to sites that were visited in just one year
i <- occDetdata$site %in% sites_to_include
if(sum(i) > 0){
occDetdata <- occDetdata[i,]
spp_vis <- spp_vis[i,]
} else stop(paste0("There are no sites visited in at least ", nyr, " years."))
# calcluate a set of data metrics for this species
data_Metrics <- dataMetrics(sp = taxa_name,
formattedData = list(occDetdata=occDetdata, spp_vis=spp_vis))
is.wholenumber <-
function(x, tol = .Machine$double.eps^0.5) {
if(is.numeric(x)) abs(x - round(x)) < tol
else FALSE
}
# check there is enough data to run a model. If so, proceed with the main event
if(is.wholenumber(criterion)) {
# the criterion is a whole number. this defines the number of records
# check whether the number of records meets this value
proceed <- sum(spp_vis[,taxa_name]) >= criterion
} else if(criterion == "EqualWt") {
proceed <- applyRuleOfThumb(data_Metrics, "EqualWt")
} else if(criterion == "HighSpec") {
proceed <- applyRuleOfThumb(data_Metrics, "HighSpec")
} else
stop("Criterion must be either an integer, `EqualWt` or `HighSpec`")
if(!proceed){
# there is not enough data: set the outputs accordingly
bugs_data <- list(y=0,nsite=0,nvisit=0)
BD_MD <- error_status <- site_match <- modelcode <- NA
warning(paste(taxa_name,
"has insufficient data after site filtering. Either decrease nyr or change the criterion"))
out <- list(message = "No model run: insufficient data")
} else {
# There is enough data: we can proceed with the main event
# Check if R2jags is installed
if (!requireNamespace("R2jags", quietly = TRUE)) {
stop("Package 'R2jags' is needed for the 'occDetModel' function to work. Please insatll this from CRAN. You will also be required to install JAGS, which you can download from https://sourceforge.net/projects/mcmc-jags/files/JAGS/",
call. = FALSE)
}
# If doing regional we take control of model specification
if(!is.null(regional_codes)){
oldmodeltype <- modeltype
modeltype <-c('ranwalk', 'halfcauchy',
c('jul_date', 'catlistlength')[c('jul_date', 'catlistlength') %in% modeltype])
if(!all(oldmodeltype %in% modeltype))
message('When using regional data the model specification will be set to ranwalk, halfcauchy. jul_date and catlistlength can still be specified by the user')
}
if(!'catlistlength' %in% modeltype & !'nolistlength' %in% modeltype){
modeltype <- c(modeltype, 'contlistlength')
}
if(!any(c('catlistlength', 'nolistlength', 'contlistlength') %in% modeltype)){
stop('modeltype should contain one of "catlistlength", "nolistlength", "contlistlength",
which specify the list-length effect to be included')
}
# Do we have JAGS installed - this works only on windows
if(.Platform$OS.type == "windows"){
JAGS_test <- Sys.which(names = 'jags-terminal.exe')
if(JAGS_test[[1]] == '') stop('R cannot find jags-terminal.exe, check that you have installed JAGS')
}
# Add the focal column (was the species recorded on the visit?). Use the spp_vis dataframe to extract this info
nrow1 <- nrow(occDetdata)
occDetdata <- merge(occDetdata, spp_vis[,c("visit", taxa_name)])
if(nrow1 != nrow(occDetdata)) stop('some visits have been lost')
names(occDetdata)[names(occDetdata) == taxa_name] <- "focal"
# If we are using regional codes do some checks
if(!is.null(regional_codes)){
if(!inherits(regional_codes, 'data.frame')) stop("regional_codes should be a data.frame")
# check whether there is a column called "site".
#If not, let's assume that the site column is the first in the dataframe
#NB previous behaviour was to assume *both* that it was column 1 and named 'site'
if(!"site" %in% names(regional_codes)) {
warning(paste0("renaming ", names(regional_codes)[1], " as 'site'"))
names(regional_codes)[1] <- "site"
}
# remove locations that are not in the data
abs_sites <- as.character(regional_codes$site)[!as.character(regional_codes$site) %in% as.character(occDetdata$site)]
if(length(abs_sites) > 0){
warning(paste(length(abs_sites), 'sites are in regional_codes but not in occurrence data'))
}
if(any(is.na(regional_codes))){
warning(paste(sum(is.na(regional_codes)),
"NAs are present in regional_codes, these will be replaced with 0s"))
regional_codes[is.na(regional_codes)] <- 0
}
sites_no_region <- as.character(regional_codes$site[rowSums(regional_codes[,2:ncol(regional_codes)]) == 0])
sites_multi_region <- as.character(regional_codes$site[rowSums(regional_codes[,2:ncol(regional_codes)]) > 1])
site_counts <- table(regional_codes[,1])
sites_multi_row <- names(site_counts[site_counts > 1])
if(length(sites_no_region) > 0)
warning(paste(length(sites_no_region), 'sites are not assigned to a region in regional_codes and will be removed'))
if(length(sites_multi_region) > 0)
warning(paste(length(sites_multi_region), 'sites are assigned to more than one region in regional_codes and will be removed'))
if(length(sites_multi_row) > 0)
warning(paste(length(bad_sites), 'site(s) are present in more than one region and will be removed'))
# finally check that every site with species data also has a region
sites_no_region2 <- setdiff(sites_to_include, as.character(regional_codes$site))
if(length(sites_no_region2) >= 1)
warning(paste(length(sites_no_region2), 'sites are in occurrence data but not in regional data and will be removed'))
# strip these same sites out of the occDetdata & the regional codes
bad_sites <- unique(c(abs_sites, sites_multi_row, sites_multi_region, sites_no_region, sites_no_region2))
regional_codes <- subset(regional_codes, !site %in% bad_sites)
occDetdata <- subset(occDetdata, !site %in% bad_sites)
}
# If we are using regional aggregates do some checks
if(!is.null(region_aggs)){
if(is.null(regional_codes)) stop('Cannot use regional aggregates if regional_codes is not supplied')
stopifnot(inherits(region_aggs, 'list'))
if(!all(unique(unlist(region_aggs)) %in% tail(colnames(regional_codes), -1))){
stop(paste0('Aggregate members [',
paste(unique(unlist(region_aggs))[!unique(unlist(region_aggs)) %in% tail(colnames(regional_codes), -1)],
collapse = ', '),
'] not in regional_codes column names [',
paste(tail(colnames(regional_codes), -1),
collapse = ', '),
']'))
}
}
# look for missing years before time frame can be extended using max_year parameter
years <- (max(occDetdata$TP) - min(occDetdata$TP))+1
if(length(unique(occDetdata$TP)) != years) {
# find out which years have no data
missing_yrs <- with(occDetdata, setdiff(min(TP):max(TP), unique(TP)))
if(length(missing_yrs) ==1)
error_msg <- paste0('There are no visits in year ', missing_yrs,'. This means there is no data, for any species in this year. BUGS cannot run if there is a year with no data. Quitting...')
else
error_msg <- paste0('There are ', length(missing_yrs),' years with no visits, including ', missing_yrs[1],'. This means there is no data, for any species in these years. BUGS cannot run if any year has no data. Quitting...')
stop(error_msg)
}
# year and site need to be numeric starting from 1 to length of them. This is due to the way the bugs code is written
nsite <- length(unique(occDetdata$site))
site_match <- data.frame(name = unique(occDetdata$site), id = 1:nsite)
occDetdata <- merge(occDetdata, site_match, by.x='site', by.y="name")
# need to get a measure of whether the species was on that site in that year, unequivocally, in zst
zst <- acast(occDetdata, id ~ factor(TP), value.var = 'focal', max, fill = 0) # initial values for the latent state = observed state
# Calculate min year. We're doing it now as it's fine if we've dropped the first year(s) of data and nothing in the middle
min_year <- min(occDetdata$TP)
if(min_year != min_year_original){
warning(paste0('\nThe first year of the data has changed, as the first years of data were dropped.\n',
'The original first year was ',min_year_original,'. It is now ',min_year,'\n'))
}
# if the max_year is not null, edit the zst table to add the additional years required
if(!is.null(max_year)){
# check that max_year is a numeric value
if(!is.numeric(max_year)) stop('max_year should be a numeric value')
# check that max_year is greater than the final year of the dataset
if(max_year <= max(occDetdata$TP)) stop('max_year should be greater than the final year of available data')
nTP <- max_year - min_year + 1
# if nyear is greater than the number of years due to different specification of max_year,
# add on additional columns to zst so that inital values can be create for these years.
if(nTP > ncol(zst)){
# work out how many columns need to be added
to_add <- nTP - ncol(zst)
zst <- cbind(zst, matrix(0, ncol = to_add, nrow = nrow(zst)))
# add column names
colnames(zst) <- 1:nTP
}
# if a value has not been selected for max_year then continue as before
}else{
# record the max year
max_year <- max(occDetdata$TP)
nTP <- max_year - min_year + 1
}
# look for time periods with no data
if(length(unique(occDetdata$TP)) != nTP) stop('It looks like you have time periods with no data. This will crash BUGS')
# TP and site need to be numeric starting from 1 to length of them. This is due to the way the bugs code is written
occDetdata$TP <- occDetdata$TP - min(occDetdata$TP) + 1
# Parameter you wish to monitor, shown in the output
parameters <- c("psi.fs", "tau2", "tau.lp", "alpha.p", "a")
# If ranwalk + halfcauchy monitor mu.lp
if(all(c('ranwalk', 'halfcauchy') %in% modeltype)){
if(!'centering' %in% tolower(modeltype) & !'intercept' %in% tolower(modeltype)){
parameters <- c(parameters, "mu.lp")
}
}
# If sparta monitor mu.lp
if('sparta' %in% tolower(modeltype)) {
parameters <- c(parameters, "mu.lp")
}
# Add user specified paramters if given
if(!is.null(additional.parameters)) parameters <- c(parameters, additional.parameters)
# Add parameters for each of the model types
for(ptype in modeltype){
parameters <- getParameters(parameters, modeltype = ptype)
}
# add parameters for regions
if(!is.null(regional_codes)){
# remove spaces from region names, then extract them
colnames(regional_codes)[-1] <- gsub(' ', '_', colnames(regional_codes)[-1])
region_names <- colnames(regional_codes)[-1]
parameters <- c(parameters,
paste0("psi.fs.r_", region_names),
paste0("a_", region_names))
# ignore some parameters not used in regions model
parameters <- parameters[!parameters %in% c('a')]
}
# add parameters for regional aggregates
if(!is.null(region_aggs)){
parameters <- c(parameters, paste0('psi.fs.r_', names(region_aggs)))
}
# now assemble the bugs_data and related objects
# HERE IS THE BUGS DATA
bugs_data <- with(occDetdata,
list(y = as.numeric(focal), Year = TP, Site = id,
nyear = nTP, nsite = nsite, nvisit = nrow(occDetdata)))
# temporary test
if(max(occDetdata$id) != bugs_data$nsite) stop(paste0("Site idenitifier exceeds nsite (",
max(occDetdata$id), nsite,")"))
for(btype in modeltype){ # one call per element of modeltype: each adds a section
bugs_data <- getBugsData(bugs_data, modeltype = btype,
occDetData = occDetdata)
}
# Add additional elements if specified
if(!is.null(additional.BUGS.elements)){
if(!is.list(additional.BUGS.elements)){
stop("additional.BUGS.elements must be a list")
} else {
bugs_data <- c(bugs_data, additional.BUGS.elements)
}
}
# make a copy of the bugs_data to calculate metadata from
bugs_data_copy <- with(occDetdata, data.frame(y = as.numeric(focal), year = TP, site = site))
BD_MD <- list()
# Add regional elements to bugs data
if(!is.null(regional_codes)){
# removed unwanted bugs elements
bugs_data <- bugs_data[!names(bugs_data) %in% c('psi0.a', 'psi0.b')]
# expand the lookup table to include regions
regional_lookup <- merge(regional_codes, site_match, by.y="name", by.x="site")
zero_sites <- NULL
for(region in region_names){
if(sum(regional_codes[ , region]) != 0){
bugs_data[paste0('r_', region)] <- list(regional_lookup[order(regional_lookup$id),region])
bugs_data[paste0('nsite_r_', region)] <- sum(regional_codes[, region])
} else {
zero_sites <- c(zero_sites, region)
}
}
if(!is.null(zero_sites)){
warning(paste('The following regions have no data and should not be modelled:',
paste(zero_sites, collapse = ', '),
'- These regions will not be included in the model'))
# remove parameters
parameters <- parameters[!parameters %in% c(paste0("psi.fs.r_", zero_sites),
paste0("a_", zero_sites))]
# remove regions for regions_codes
regional_codes <- regional_codes[ ,!colnames(regional_codes) %in% zero_sites]
region_names <- setdiff(region_names, zero_sites)
# remove region aggregates
if(rem_aggs_with_missing_regions){
rem_aggs <- unlist(lapply(region_aggs, FUN = function(x) any(zero_sites %in% x)))
rem_aggs_names <- names(region_aggs)[rem_aggs]
# remove aggs if you need to
if(length(rem_aggs_names) > 0){
warning(paste('The following region aggregates have to be removed as they contain a region with no data:',
paste(rem_aggs_names, collapse = ', '),
'- These region aggregates will not be included in the model\n',
'If you want to keep aggregates with one or more missing regions,',
'set rem_aggs_with_missing_regions=FALSE'))
region_aggs <- region_aggs[!names(region_aggs) %in% rem_aggs_names]
parameters <- parameters[!parameters %in% paste0('psi.fs.r_', rem_aggs_names)]
}
} else {
rem_aggs <- unlist(lapply(region_aggs, FUN = function(x) all(x %in% zero_sites)))
rem_aggs_names <- names(region_aggs)[rem_aggs]
edit_aggs <- unlist(lapply(region_aggs, FUN = function(x) any(zero_sites %in% x)))
edit_aggs_names <- names(region_aggs)[edit_aggs & !(rem_aggs)]
# edit aggs if you need to
if(length(edit_aggs_names) > 0){
warning(paste('The following region aggregates have to be edited as they contain regions with no data:',
paste(edit_aggs_names, collapse = ', '),
'\n- These region aggregates will still be included in the model\n'))
# Recreate aggs, removing regions without data
region_aggs_new <- lapply(region_aggs, FUN = function(agg){
agg[!(agg %in% zero_sites)]
})
names(region_aggs_new) <- names(region_aggs)
region_aggs <- region_aggs_new
}
# remove aggs completely if you need to
if(length(rem_aggs_names) > 0){
warning(paste('The following region aggregates have to be removed as all regions within them have no data:',
paste(rem_aggs_names, collapse = ', '),
'- These region aggregates will not be included in the model'))
region_aggs <- region_aggs[!names(region_aggs) %in% rem_aggs_names]
parameters <- parameters[!parameters %in% paste0('psi.fs.r_', rem_aggs_names)]
}
}
}
regions_years <- list()
regions_nobs <- list()
regions_sites <-list()
bugs_data_copy <- merge(bugs_data_copy, regional_codes, all.x = TRUE)
# add regional codes to this copy and get n_obs, max and min years and year gaps for each region
for(region_name in region_names){
regions_nobs[paste0('n_obs_','r_', region_name)] <- sum(bugs_data_copy$y * bugs_data_copy[,region_name])
regions_sites[paste0('n_sites_','r_', region_name)] <- sum(bugs_data_copy[,region_name])
current_r <- bugs_data_copy$y * bugs_data_copy[,region_name] * bugs_data_copy$year
current_r <- subset(current_r,current_r !=0)
if(length(current_r) > 2){
current_rmin <- (min_year-1) + min(current_r)
current_rmax <- (min_year-1) + max(current_r)
regions_years[paste0('min_year_data_','r_', region_name)] <- current_rmin
regions_years[paste0('max_year_data_','r_', region_name)] <- current_rmax
current_datagaps <- dataGaps(current_r, min_year, max_year, current_rmin, current_rmax)
regions_years[paste0('gap_start_','r_', region_name)] <- current_datagaps$gap_start
regions_years[paste0('gap_end_','r_', region_name)] <- current_datagaps$gap_end
regions_years[paste0('gap_middle_','r_', region_name)] <- current_datagaps$gap_middle
} else if(length(current_r) == 1) {
current_rmin <- (min_year-1) + min(current_r)
current_rmax <- (min_year-1) + max(current_r)
regions_years[paste0('min_year_data_','r_', region_name)] <- current_rmin
regions_years[paste0('max_year_data_','r_', region_name)] <- current_rmax
current_datagaps <- dataGaps(current_r, min_year, max_year, current_rmin, current_rmax)
regions_years[paste0('gap_start_','r_', region_name)] <- current_datagaps$gap_start
regions_years[paste0('gap_end_','r_', region_name)] <- current_datagaps$gap_end
regions_years[paste0('gap_middle_','r_', region_name)] <- NA
} else if(length(current_r) < 1){
regions_years[paste0('min_year_data_','r_', region_name)] <- NA
regions_years[paste0('max_year_data_','r_', region_name)] <- NA
regions_years[paste0('gap_start_','r_', region_name)] <- NA
regions_years[paste0('gap_end_','r_', region_name)] <- NA
regions_years[paste0('gap_middle_','r_', region_name)] <- NA
}
}
}
# add max and min data years for the whole dataset
all_years_data <- bugs_data_copy$y * bugs_data_copy$year
all_years_data <- subset(all_years_data, all_years_data !=0)
BD_MD$min_year_data <- (min_year-1) + min(all_years_data)
BD_MD$max_year_data <- (min_year-1) + max(all_years_data)
# use these to find year gap data
BD_MD$yeargaps<-dataGaps(all_years_data, min_year, max_year, BD_MD$min_year_data, BD_MD$max_year_data)
initiate <- function(z, nTP, bugs_data) {
init <- list (z = z,
alpha.p = rep(runif(1, -2, 2),
nTP),
a = rep(runif(1, -2, 2), nTP),
eta = rep(runif(1, -2, 2), bugs_data$nsite))
# add extra init values if needed
for(itype in modeltype){
init <- getInitValues(init, modeltype = itype)
}
# if ranwalk + centreing a -> aa
if(all(c('ranwalk', 'centering') %in% modeltype)){
names(init)[names(init) == 'a'] <- 'aa'
}
# add user specified values
if(!is.null(additional.init.values)){
if(!is.list(additional.init.values)){
stop("additional.BUGS.elements must be a list")
} else {
init <- c(init, additional.init.values)
}
}
return(init)
}
# set the initial values...
init.vals <- replicate(n_chains, initiate(z = zst,
nTP = nTP,
bugs_data = bugs_data),
simplify = F)
# modify initial values for regional model
if(!is.null(regional_codes)){
# remove initial values for a and psi
init.vals <- lapply(init.vals, FUN = function(x){
x[!names(x) %in% c('psi0', 'a')]
})
}
# Select the correct model file
if(is.null(model.function)){
model.file <- getModelFile(modeltype,
regional_codes = regional_codes,
region_aggs = region_aggs)
} else {
cat('Using user model.function')
model.file <- model.function
}
modelcode <- paste(readLines(model.file), collapse = '\n')
### REVIEW CODE
cat('#### PLEASE REVIEW THE BELOW ####\n\n')
cat('Your model settings:', paste(modeltype, collapse = ', '))
cat('\n\nModel File:\n\n')
cat(modelcode)
cat('\n\nbugs_data:\n\n')
cat(str(bugs_data))
cat('\n\ninit.vals:\n\n')
cat(str(init.vals))
cat('\n\nparameters:\n\n')
cat(parameters)
###
if(identical(model.file, occDetBUGScode)){
warning("You have selected a formulation with potentially informative priors that are subject to boundary effects (See Outhwaite et al 2018 for details).
This option is retained within sparta for backwards compatibility only: we strongly recommend that you do
not use this option for inferring changes in species' distributions")
}
error_status <- try(
out <- R2jags::jags(bugs_data, init.vals, parameters, model.file = model.file,
n.chains = n_chains, n.iter = n_iterations, n.thin = thinning,
n.burnin = burnin, DIC = TRUE)
)
dir.create(path = output_dir, showWarnings = FALSE) # create the top results folder
if (class(error_status) == "try-error" ){
warning('JAGS returned an error when modelling', taxa_name, 'error:', error_status[1])
return(NULL)
}
} # end of "if(proceed)"
########################################## Add metadata
# calcluate number of site:year combinations with repeat visits (across the whole dataset)
temp <- as.data.frame(with(occDetdata, table(site, TP)))$Freq
prop_visits_repeated <- mean(temp[temp>0] > 1)
if(is.null(attributes(occDetdata))){
metadata <- list()
} else if('metadata' %in% names(attributes(occDetdata))){
metadata <- attr(occDetdata, 'metadata')
} else {
metadata <- list()
}
# get the sessionInfo and coerce into a useable format
session.info <- sessionInfo()
packages <- c(sapply(session.info[7][[1]], function(x) x$Version),
sapply(session.info[8][[1]], function(x) x$Version))
MD <- list(method = 'sparta::occDetFunc',
call = call <- match.call(),
date = Sys.Date(),
user = Sys.info()['user'],
summary = list(species = taxa_name,
n_sites = length(unique(occDetdata$site)),
n_years = length(unique(occDetdata$TP)),
n_visits = nrow(occDetdata),
n_obs = sum(occDetdata$focal),
n_species_sites <- length(unique(subset(occDetdata, focal=TRUE)$site)),
min_year_model = min_year,
max_year_model = max_year),
gaps = ifelse(is.na(BD_MD), NA, list(
min_year_data = BD_MD$min_year_data,
max_year_data = BD_MD$max_year_data,
gap_start = BD_MD$yeargaps$gap_start,
gap_end = BD_MD$yeargaps$gap_end,
gap_middle = BD_MD$yeargaps$gap_middle)),
spp_Metrics = as.list(data_Metrics),
dataset_Metrics = list(# dataset properties
totalObservations = sum(occDetdata$L),
propRepeats = prop_visits_repeated),
provenance = provenance,
output_path = ifelse(test = write_results,
file.path(getwd(), output_dir, paste(taxa_name, ".rdata", sep = "")),
NA),
session_info = list(session.info[-c(7:8)],
packages)
)
# add regional elements if applicable
if(!is.null(regional_codes) & proceed){
MD$summary$region_nobs <- regions_nobs
MD$summary$region_years <- regions_years
MD$summary$region_nsite <- regions_sites
}else{
MD$summary$region_nobs <- NA
MD$summary$region_years <- NA
MD$summary$region_nsite <- NA
}
# If the data coming in is the result of analysis we want to
# append this data
name <- 'analysis'
i = 1
while(name %in% names(metadata)){
name <- paste0(name, i)
i = i + 1
}
metadata[name] <- list(MD)
attr(out, 'metadata') <- metadata
if(!saveMatrix) out$BUGSoutput$sims.matrix <- NULL
out$SPP_NAME <- taxa_name
out$min_year <- min_year
out$max_year <- max_year
out$sites_included <- ifelse(test = proceed, yes = site_match, no = NA)
out$nsites <- bugs_data$nsite
out$nvisits <- bugs_data$nvisit
out$species_observations <- sum(bugs_data$y)
out$sparta_version <- packages["sparta"]
if(!is.null(regional_codes)) out$regions <- region_names
if(!is.null(region_aggs)) out$region_aggs <- region_aggs
if(return_data) out$bugs_data <- bugs_data
attr(out, 'modeltype') <- modeltype
attr(out, 'modelcode') <- modelcode
class(out) <- 'occDet'
if(write_results) save(out, file = file.path(output_dir, paste(taxa_name, ".rdata", sep = "")))
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table_output.R
\name{spectrum_output_table}
\alias{spectrum_output_table}
\title{Table to write CSV outputs for Spectrum}
\usage{
spectrum_output_table(mod, fp)
}
\value{
a data.frame to write to CSV file for ingestion into Spectrum
}
\description{
Table to write CSV outputs for Spectrum
}
\details{
Presently this returns point estimates for age 15+ population by sex:
\itemize{
\item Number PLHIV,
\item Ever tested among PLHIV
\item Aware of HIV+ status
\item On ART
}
PLHIV is mid-year estimate. All other outcomes are end of year estimate.
}
\examples{
\dontrun{
write.csv(spectrum_output_table(mod, fp),
"~/Downloads/Malawi-shiny90-example-output.csv",
row.names = FALSE)
}
}
| /man/spectrum_output_table.Rd | permissive | mrc-ide/first90release | R | false | true | 787 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table_output.R
\name{spectrum_output_table}
\alias{spectrum_output_table}
\title{Table to write CSV outputs for Spectrum}
\usage{
spectrum_output_table(mod, fp)
}
\value{
a data.frame to write to CSV file for ingestion into Spectrum
}
\description{
Table to write CSV outputs for Spectrum
}
\details{
Presently this returns point estimates for age 15+ population by sex:
\itemize{
\item Number PLHIV,
\item Ever tested among PLHIV
\item Aware of HIV+ status
\item On ART
}
PLHIV is mid-year estimate. All other outcomes are end of year estimate.
}
\examples{
\dontrun{
write.csv(spectrum_output_table(mod, fp),
"~/Downloads/Malawi-shiny90-example-output.csv",
row.names = FALSE)
}
}
|
# Load data
input_data <- read.csv("household_power_consumption.txt",header = TRUE,sep = ";")
# Change Date column to Date
input_data$Date<-as.Date(input_data$Date,format="%d/%m/%Y")
# Get a subset of data
date_from <- as.Date("2007-02-01", format="%Y-%m-%d")
date_to <- as.Date("2007-02-02", format="%Y-%m-%d")
used_data <- subset(input_data, Date %in% range(date_from, date_to))
# Get Global Active Power and turn to numeric for plotting
global_active_power <- as.numeric(as.character(used_data$Global_active_power))
# Plot and save to png
datetime <- as.POSIXct(paste(used_data$Date, used_data$Time),format="%Y-%m-%d %H:%M:%S")
plot(x = datetime, y = global_active_power,type="l",
ylab="Global Active Power (kilowatts)")
png(width = 480, height = 480)
dev.copy(png, "plot2.png")
dev.off()
| /plot2.R | no_license | Apple2000/ExData_Plotting1 | R | false | false | 798 | r | # Load data
input_data <- read.csv("household_power_consumption.txt",header = TRUE,sep = ";")
# Change Date column to Date
input_data$Date<-as.Date(input_data$Date,format="%d/%m/%Y")
# Get a subset of data
date_from <- as.Date("2007-02-01", format="%Y-%m-%d")
date_to <- as.Date("2007-02-02", format="%Y-%m-%d")
used_data <- subset(input_data, Date %in% range(date_from, date_to))
# Get Global Active Power and turn to numeric for plotting
global_active_power <- as.numeric(as.character(used_data$Global_active_power))
# Plot and save to png
datetime <- as.POSIXct(paste(used_data$Date, used_data$Time),format="%Y-%m-%d %H:%M:%S")
plot(x = datetime, y = global_active_power,type="l",
ylab="Global Active Power (kilowatts)")
png(width = 480, height = 480)
dev.copy(png, "plot2.png")
dev.off()
|
#' Simple functiont for mirroing all existing data.
get_mirror_values <- function(data,
importFlowCode = 1,
exportFlowCode = 2,
return.direct = TRUE) {
require(dplyr)
# Mirroring bilateral trade
datam <-
data %>%
select_(.dots = str_c("-", names(.)[names(.) == "Classification"])) %>%
filter(Partner.Code != 0) %>%
mutate(
Type = "Mirror",
Trade.Flow.Code =
ifelse(
Trade.Flow.Code == importFlowCode,
exportFlowCode,
ifelse(Trade.Flow.Code == exportFlowCode,
importFlowCode,
NA)
),
Reporter.Code_m = Partner.Code,
Partner.Code = Reporter.Code,
Reporter.Code = Reporter.Code_m
) %>%
select(-Reporter.Code_m) %>%
agg_world(aggReps = F, aggPart = T, returnAll = T) %>%
tbl_df()
if (return.direct) {
datam <-
# Aggregating World for reporters
bind_rows(
datam,
mutate(data %>%
select_(.dots = str_c("-", names(.)[names(.) == "Classification"])), Type = "Direct")) %>%
tbl_df()
}
return(datam)
} | /R/get_mirror_values.R | no_license | EBukin/tradeAnalysis-pack | R | false | false | 1,202 | r | #' Simple functiont for mirroing all existing data.
get_mirror_values <- function(data,
importFlowCode = 1,
exportFlowCode = 2,
return.direct = TRUE) {
require(dplyr)
# Mirroring bilateral trade
datam <-
data %>%
select_(.dots = str_c("-", names(.)[names(.) == "Classification"])) %>%
filter(Partner.Code != 0) %>%
mutate(
Type = "Mirror",
Trade.Flow.Code =
ifelse(
Trade.Flow.Code == importFlowCode,
exportFlowCode,
ifelse(Trade.Flow.Code == exportFlowCode,
importFlowCode,
NA)
),
Reporter.Code_m = Partner.Code,
Partner.Code = Reporter.Code,
Reporter.Code = Reporter.Code_m
) %>%
select(-Reporter.Code_m) %>%
agg_world(aggReps = F, aggPart = T, returnAll = T) %>%
tbl_df()
if (return.direct) {
datam <-
# Aggregating World for reporters
bind_rows(
datam,
mutate(data %>%
select_(.dots = str_c("-", names(.)[names(.) == "Classification"])), Type = "Direct")) %>%
tbl_df()
}
return(datam)
} |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/native_dv.R
\name{create_dataverse}
\alias{create_dataverse}
\title{Create Dataverse}
\usage{
create_dataverse(dataverse, key = Sys.getenv("DATAVERSE_KEY"),
server = Sys.getenv("DATAVERSE_SERVER"), ...)
}
\arguments{
\item{dataverse}{A character string specifying a Dataverse name or an object of class \dQuote{dataverse}.}
\item{key}{A character string specifying a Dataverse server API key. If one is not specified, functions calling authenticated API endpoints will fail. Keys can be specified atomically or globally using \code{Sys.setenv("DATAVERSE_KEY" = "examplekey")}.}
\item{server}{A character string specifying a Dataverse server. There are multiple Dataverse installations, but the defaults is to use the Harvard Dataverse. This can be modified atomically or globally using \code{Sys.setenv("DATAVERSE_SERVER" = "dataverse.example.com")}.}
\item{...}{Additional arguments passed to an HTTP request function, such as \code{\link[httr]{GET}}, \code{\link[httr]{POST}}, or \code{\link[httr]{DELETE}}.}
}
\value{
A list.
}
\description{
Create a new Dataverse
}
\details{
This function can create a new Dataverse. In the language of Dataverse, a user has a \dQuote{root} Dataverse into which they can create further nested Dataverses and/or \dQuote{datasets} that contain, for example, a set of files for a specific project. Creating a new Dataverse can therefore be a useful way to organize other related Dataverses or sets of related datasets.
For example, if one were involved in an ongoing project that generated monthly data. One may want to store each month's data and related files in a separate \dQuote{dataset}, so that each has its own persistent identifier (e.g., DOI), but keep all of these datasets within a named Dataverse so that the project's files are kept separate the user's personal Dataverse records. The flexible nesting of Dataverses allows for a number of possible organizational approaches.
}
\examples{
\dontrun{}
}
\seealso{
\code{\link{get_dataverse}}, \code{\link{delete_dataverse}}, \code{\link{publish_dataverse}}, \code{\link{create_dataset}}
}
| /man/create_dataverse.Rd | no_license | thehyve/dataverse-client-r | R | false | false | 2,179 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/native_dv.R
\name{create_dataverse}
\alias{create_dataverse}
\title{Create Dataverse}
\usage{
create_dataverse(dataverse, key = Sys.getenv("DATAVERSE_KEY"),
server = Sys.getenv("DATAVERSE_SERVER"), ...)
}
\arguments{
\item{dataverse}{A character string specifying a Dataverse name or an object of class \dQuote{dataverse}.}
\item{key}{A character string specifying a Dataverse server API key. If one is not specified, functions calling authenticated API endpoints will fail. Keys can be specified atomically or globally using \code{Sys.setenv("DATAVERSE_KEY" = "examplekey")}.}
\item{server}{A character string specifying a Dataverse server. There are multiple Dataverse installations, but the defaults is to use the Harvard Dataverse. This can be modified atomically or globally using \code{Sys.setenv("DATAVERSE_SERVER" = "dataverse.example.com")}.}
\item{...}{Additional arguments passed to an HTTP request function, such as \code{\link[httr]{GET}}, \code{\link[httr]{POST}}, or \code{\link[httr]{DELETE}}.}
}
\value{
A list.
}
\description{
Create a new Dataverse
}
\details{
This function can create a new Dataverse. In the language of Dataverse, a user has a \dQuote{root} Dataverse into which they can create further nested Dataverses and/or \dQuote{datasets} that contain, for example, a set of files for a specific project. Creating a new Dataverse can therefore be a useful way to organize other related Dataverses or sets of related datasets.
For example, if one were involved in an ongoing project that generated monthly data. One may want to store each month's data and related files in a separate \dQuote{dataset}, so that each has its own persistent identifier (e.g., DOI), but keep all of these datasets within a named Dataverse so that the project's files are kept separate the user's personal Dataverse records. The flexible nesting of Dataverses allows for a number of possible organizational approaches.
}
\examples{
\dontrun{}
}
\seealso{
\code{\link{get_dataverse}}, \code{\link{delete_dataverse}}, \code{\link{publish_dataverse}}, \code{\link{create_dataset}}
}
|
## version: 1.30
## method: post
## path: /secrets/create
## code: 201
## response: {"ID":"ktnbjxoalbkvbvedmg1urrz8h"}
list(id = "ktnbjxoalbkvbvedmg1urrz8h")
| /tests/testthat/sample_responses/v1.30/secret_create.R | no_license | cran/stevedore | R | false | false | 158 | r | ## version: 1.30
## method: post
## path: /secrets/create
## code: 201
## response: {"ID":"ktnbjxoalbkvbvedmg1urrz8h"}
list(id = "ktnbjxoalbkvbvedmg1urrz8h")
|
library(rpart)
# data frame preparattion
data <- read.csv("C:\\Users\\coep\\Desktop\\eNSEMBLE\\DATA\\default_credit_card.csv",header = TRUE)
data1<- data
sample1 <- seq(from = 1, to = 300, by = 1)
#sample1 <- sample(nrow(data1), 300, replace = FALSE)
data<-data1[sample1,]
x_train <- data[,-25]
y_train <- data[,25]
x <- cbind(x_train,y_train)
x$y_train <- as.factor(x$y_train)
#model Creation
dt <- rpart(y_train ~ .,data = x_train , method = "class")
summary(rfModel)
#Accuracy check
sample2 <- seq(from = 1000, to = 1500, by = 1)
#sample2 <- sample(nrow(data1), 300, replace = FALSE)
dataTest <- data1[sample2,]
x_test<- dataTest[,-25]
y_test<- dataTest[,25]
fitted.results <- predict(dt,x_test,type = 'class')
misClasificError <- mean(fitted.results != y_test)
print(paste('Accuracy',1-misClasificError))
| /DecisionTree.R | no_license | ABurkin/Machine-Learning-Algorithms-in-R | R | false | false | 841 | r | library(rpart)
# data frame preparattion
data <- read.csv("C:\\Users\\coep\\Desktop\\eNSEMBLE\\DATA\\default_credit_card.csv",header = TRUE)
data1<- data
sample1 <- seq(from = 1, to = 300, by = 1)
#sample1 <- sample(nrow(data1), 300, replace = FALSE)
data<-data1[sample1,]
x_train <- data[,-25]
y_train <- data[,25]
x <- cbind(x_train,y_train)
x$y_train <- as.factor(x$y_train)
#model Creation
dt <- rpart(y_train ~ .,data = x_train , method = "class")
summary(rfModel)
#Accuracy check
sample2 <- seq(from = 1000, to = 1500, by = 1)
#sample2 <- sample(nrow(data1), 300, replace = FALSE)
dataTest <- data1[sample2,]
x_test<- dataTest[,-25]
y_test<- dataTest[,25]
fitted.results <- predict(dt,x_test,type = 'class')
misClasificError <- mean(fitted.results != y_test)
print(paste('Accuracy',1-misClasificError))
|
# Exercise 1: calling built-in functions
# Author: Jueyao Liu
# Create a variable `my_name` that contains your name
my_name <- "Jueyao"
# Create a variable `name_length` that holds how many letters (including spaces)
# are in your name (use the `nchar()` function)
name_length <- nchar(my_name)
# Print the number of letters in your name
name_length
# Create a variable `now_doing` that is your name followed by "is programming!"
# (use the `paste()` function)
now_doing <- paste(my_name, "is programming!")
# Make the `now_doing` variable upper case
toupper(now_doing)
### Bonus
# Pick two of your favorite numbers (between 1 and 100) and assign them to
# variables `fav_1` and `fav_2`
fav_1 <- 19
fav_2 <- 6
# Divide each number by the square root of 201 and save the new value in the
# original variable
fav_1 <- fav_1/sqrt(201)
fav_2 <- fav_2/sqrt(201)
# Create a variable `raw_sum` that is the sum of the two variables. Use the
# `sum()` function for practice.
raw_sum <- sum(fav_1, fav_2)
# Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place.
# Use the `round()` function.
round_sum <- round(raw_sum, 1)
# Create two new variables `round_1` and `round_2` that are your `fav_1` and
# `fav_2` variables rounded to 1 decimal places
round_1 <- round(fav_1, 1)
round_2 <- round(fav_2, 1)
# Create a variable `sum_round` that is the sum of the rounded values
sum_round <- sum(round_1, round_2)
# Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!)
max(round_sum, sum_round)
| /exercise_6-1.R | no_license | jueyaoyao/w2_exercises | R | false | false | 1,548 | r | # Exercise 1: calling built-in functions
# Author: Jueyao Liu
# Create a variable `my_name` that contains your name
my_name <- "Jueyao"
# Create a variable `name_length` that holds how many letters (including spaces)
# are in your name (use the `nchar()` function)
name_length <- nchar(my_name)
# Print the number of letters in your name
name_length
# Create a variable `now_doing` that is your name followed by "is programming!"
# (use the `paste()` function)
now_doing <- paste(my_name, "is programming!")
# Make the `now_doing` variable upper case
toupper(now_doing)
### Bonus
# Pick two of your favorite numbers (between 1 and 100) and assign them to
# variables `fav_1` and `fav_2`
fav_1 <- 19
fav_2 <- 6
# Divide each number by the square root of 201 and save the new value in the
# original variable
fav_1 <- fav_1/sqrt(201)
fav_2 <- fav_2/sqrt(201)
# Create a variable `raw_sum` that is the sum of the two variables. Use the
# `sum()` function for practice.
raw_sum <- sum(fav_1, fav_2)
# Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place.
# Use the `round()` function.
round_sum <- round(raw_sum, 1)
# Create two new variables `round_1` and `round_2` that are your `fav_1` and
# `fav_2` variables rounded to 1 decimal places
round_1 <- round(fav_1, 1)
round_2 <- round(fav_2, 1)
# Create a variable `sum_round` that is the sum of the rounded values
sum_round <- sum(round_1, round_2)
# Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!)
max(round_sum, sum_round)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{db_append}
\alias{db_append}
\title{Append rows into a database table}
\usage{
db_append(df, table, db, ...)
}
\arguments{
\item{df}{data.frame}
\item{table}{character vector of an optional schema and table name}
\item{db}{database object returned by \code{\link{db_connect}}}
\item{...}{further parameters passed to \code{dbWriteTable}, eg to modify \code{row.names} or \code{append} (depends on driver)}
}
\description{
This is a wrapper around \code{\link{db_insert}} with the default parameters set to append to a table.
}
| /man/db_append.Rd | no_license | tdeenes/dbr | R | false | true | 619 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{db_append}
\alias{db_append}
\title{Append rows into a database table}
\usage{
db_append(df, table, db, ...)
}
\arguments{
\item{df}{data.frame}
\item{table}{character vector of an optional schema and table name}
\item{db}{database object returned by \code{\link{db_connect}}}
\item{...}{further parameters passed to \code{dbWriteTable}, eg to modify \code{row.names} or \code{append} (depends on driver)}
}
\description{
This is a wrapper around \code{\link{db_insert}} with the default parameters set to append to a table.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_model.R
\name{.get_model}
\alias{.get_model}
\title{Create Formula for Call to Model}
\usage{
.get_model(y, smooth_terms, linear_terms, shape_type)
}
\arguments{
\item{y}{- a string of the name of the predictor variable}
\item{smooth_terms}{- a vector of strings of the names of the spline terms}
\item{linear_terms}{- a vector of strings of the names of the linear terms}
\item{shape_type}{- a vector of strings containing the shape constraints for the spline (in the same order as smooth_terms). Can only contain shape constraints that the scam function supports}
}
\value{
- string of call to model
}
\description{
Create Formula for Call to Model
}
\examples{
y = "house_price"
smooth_terms = c("sq_ft", "distance_from_work")
linear_terms = c("num_bedrooms", "num_bathrooms")
shape_type = c("mpi", "mpd")
.get_model(y, smooth_terms, linear_terms, shape_type)
}
| /man/dot-get_model.Rd | no_license | christithomp/scamplot | R | false | true | 951 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_model.R
\name{.get_model}
\alias{.get_model}
\title{Create Formula for Call to Model}
\usage{
.get_model(y, smooth_terms, linear_terms, shape_type)
}
\arguments{
\item{y}{- a string of the name of the predictor variable}
\item{smooth_terms}{- a vector of strings of the names of the spline terms}
\item{linear_terms}{- a vector of strings of the names of the linear terms}
\item{shape_type}{- a vector of strings containing the shape constraints for the spline (in the same order as smooth_terms). Can only contain shape constraints that the scam function supports}
}
\value{
- string of call to model
}
\description{
Create Formula for Call to Model
}
\examples{
y = "house_price"
smooth_terms = c("sq_ft", "distance_from_work")
linear_terms = c("num_bedrooms", "num_bathrooms")
shape_type = c("mpi", "mpd")
.get_model(y, smooth_terms, linear_terms, shape_type)
}
|
# Make new datasets just from a set of columns (ignore columns 3, 8, 10 and 11)
tit3<-tit_new
test3<-test_new
# train tree on a tit3 set (training set)
t1<-tree(survived~.,data=tit3)
plot(t1)
text(t1,pretty=1)
# accuracy rate
1-sum(tit3[[1]]!=predict(t1,tit3,type="class"))/nrow(tit3)
#draw.tree(t1)
#################################
# apply tree to test set
t2<-prune.tree(t1,best=6)
t3<-predict(t1,newdata=test3,type="class")
# merge prediction with test set
test3[[1]]<-t3
# Go to write results
# results_v1.csv
# ---------------------------
# CV tree
cv<-cv.tree(t1,FUN=prune.tree,method="misclass")
plot(cv)
pt1 <- prune.tree(t1,best=9)
misclass.tree(pt1)
summary(pt1)
#pdf(file="./figures/pruned_tree.pdf",height=5,width=5)
plot(pt1)
text(pt1)
| /Code/Raw/classification_tree.R | no_license | astronerma/Titanic | R | false | false | 764 | r | # Make new datasets just from a set of columns (ignore columns 3, 8, 10 and 11)
tit3<-tit_new
test3<-test_new
# train tree on a tit3 set (training set)
t1<-tree(survived~.,data=tit3)
plot(t1)
text(t1,pretty=1)
# accuracy rate
1-sum(tit3[[1]]!=predict(t1,tit3,type="class"))/nrow(tit3)
#draw.tree(t1)
#################################
# apply tree to test set
t2<-prune.tree(t1,best=6)
t3<-predict(t1,newdata=test3,type="class")
# merge prediction with test set
test3[[1]]<-t3
# Go to write results
# results_v1.csv
# ---------------------------
# CV tree
cv<-cv.tree(t1,FUN=prune.tree,method="misclass")
plot(cv)
pt1 <- prune.tree(t1,best=9)
misclass.tree(pt1)
summary(pt1)
#pdf(file="./figures/pruned_tree.pdf",height=5,width=5)
plot(pt1)
text(pt1)
|
\name{sctest.default}
\alias{sctest.default}
\title{Structural Change Tests in Parametric Models}
\description{
Performs model-based tests for structural change (or parameter instability)
in parametric models.
}
\usage{
\method{sctest}{default}(x, order.by = NULL, functional = maxBB,
vcov = NULL, scores = estfun, decorrelate = TRUE, sandwich = TRUE,
parm = NULL, plot = FALSE, from = 0.1, to = NULL, nobs = NULL,
nrep = 50000, width = 0.15, xlab = NULL, \dots)
}
\arguments{
\item{x}{a model object. The model class can in principle be arbitrary
but needs to provide suitable methods for extracting the \code{scores}
and associated variance-covariance matrix \code{vcov}.}
\item{order.by}{either a vector \code{z} or a formula with a single explanatory
variable like \code{~ z}. The observations in the model
are ordered by the size of \code{z}. If set to \code{NULL} (the
default) the observations are assumed to be ordered (e.g., a
time series).}
\item{functional}{either a character specification of the functional
to be used or an \code{\link{efpFunctional}} object. For a list
of functionals see the details.}
\item{vcov}{a function to extract the covariance matrix
for the coefficients of the fitted model:
\code{vcov(x, order.by = order.by, data = data)}.
Alternatively, the character string \code{"info"}, for details see
below.}
\item{scores}{a function which extracts the scores or estimating
function from the fitted object: \code{scores(x)}, by default
this is \code{\link[sandwich]{estfun}}.}
\item{decorrelate}{logical. Should the process be decorrelated?}
\item{sandwich}{logical. Is the function \code{vcov} the full sandwich
estimator or only the meat?}
\item{parm}{integer or character specifying the component of the estimating
functions which should be used (by default all components are used).}
\item{plot}{logical. Should the result of the test also be visualized?}
\item{from, to}{numeric. In case the \code{functional} is \code{"supLM"}
(or equivalently \code{"maxLM"}), \code{from} and \code{to} can be
passed to the \code{\link{supLM}} functional.}
\item{nobs, nrep}{numeric. In case the \code{functional} is \code{"maxLMo"},
\code{nobs} and \code{nrep} are passed to the \code{\link{catL2BB}} functional.}
\item{width}{numeric. In case the \code{functional} is \code{"MOSUM"},
the bandwidth \code{width} is passed to the \code{\link{maxMOSUM}}
functional.}
\item{xlab, \dots}{graphical parameters passed to the plot method (in case
\code{plot = TRUE}).}
}
\details{
\code{sctest.default} is a convenience interface to \code{\link{gefp}} for
structural change tests (or parameter instability tests) in general
parametric models. It proceeds in the following steps:
\enumerate{
\item The generalized empirical fluctuation process (or score-based CUSUM process)
is computed via \code{scus <- gefp(x, fit = NULL, \dots)} where \code{\dots}
comprises the arguments \code{order.by}, \code{vcov}, \code{scores}, \code{decorrelate},
\code{sandwich}, \code{parm} that are simply passed on to \code{\link{gefp}}.
\item The empirical fluctuation process is visualized (if \code{plot = TRUE}) via
\code{plot(scus, functional = functional, \dots)}.
\item The empirical fluctuation is assessed by the corresponding significance test
via \code{sctest(scus, functional = functional)}.
}
The main motivation for prociding the convenience interface is that these three
steps can be easily carried out in one go along with a two convenience options:
\enumerate{
\item By default, the covariance is computed by an outer-product of gradients
estimator just as in \code{gefp}. This is always available based on the \code{scores}.
Additionally, by setting \code{vcov = "info"}, the corresponding information
matrix can be used. Then the average information is assumed to be provided by
the \code{vcov} method for the model class. (Note that this is only sensible
for models estimated by maximum likelihood.)
\item Instead of providing the \code{functional} by an \code{\link{efpFunctional}}
object, the test labels employed by Merkle and Zeileis (2013) and Merkle, Fan,
and Zeileis (2013) can be used for convenience. Namely, for continuous numeric
orderings, the following functionals are available:
\code{functional = "DM"} or \code{"dmax"} provides the double-maximum test (\code{\link{maxBB}}).
\code{"CvM"} is the Cramer-von Mises functional \code{\link{meanL2BB}}.
\code{"supLM"} or equivalently \code{"maxLM"} is Andrews' supLM test
(\code{\link{supLM}}). \code{"MOSUM"} or \code{"maxMOSUM"} is the MOSUM
functional (\code{\link{maxMOSUM}}), and \code{"range"} is the range
functional \code{\link{rangeBB}}. Furthermore, several functionals suitable
for (ordered) categorical \code{order.by} variables are provided:
\code{"LMuo"} is the unordered LM test (\code{\link{catL2BB}}),
\code{"WDMo"} is the weighted double-maximum test for ordered variables
(\code{\link{ordwmax}}), and \code{"maxLMo"} is the maxLM test for
ordered variables (\code{\link{ordL2BB}}).
}
The theoretical model class is introduced in Zeileis and Hornik (2007) with a
unifying view in Zeileis (2005), especially from an econometric perspective.
Zeileis (2006) introduces the underling computational tools \code{gefp} and
\code{efpFunctional}.
Merkle and Zeileis (2013) discuss the methods in the context of measurement
invariance which is particularly relevant to psychometric models for cross section
data. Merkle, Fan, and Zeileis (2013) extend the results to ordered categorical
variables.
Zeileis, Shah, and Patnaik (2013) provide a unifying discussion in the context
of time series methods, specifically in financial econometrics.
}
\value{
An object of class \code{"htest"} containing:
\item{statistic}{the test statistic,}
\item{p.value}{the corresponding p value,}
\item{method}{a character string with the method used,}
\item{data.name}{a character string with the data name.}
}
\references{
Merkle E.C., Zeileis A. (2013), Tests of Measurement Invariance without Subgroups:
A Generalization of Classical Methods. \emph{Psychometrika}, \bold{78}(1), 59--82.
doi:10.1007/S11336-012-9302-4
Merkle E.C., Fan J., Zeileis A. (2013), Testing for Measurement Invariance with
Respect to an Ordinal Variable. \emph{Psychometrika}, Forthcoming.
Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on
ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24},
445--466. doi:10.1080/07474930500406053.
Zeileis A. (2006), Implementing a Class of Structural Change Tests: An
Econometric Computing Approach. \emph{Computational Statistics & Data Analysis},
\bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001.
Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter
Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508.
doi:10.1111/j.1467-9574.2007.00371.x.
Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural
Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis},
Forthcoming. doi:10.1016/j.csda.2009.12.005.
}
\seealso{\code{\link{gefp}}, \code{\link{efpFunctional}}}
\examples{
## Zeileis and Hornik (2007), Section 5.3, Figure 6
data("Grossarl")
m <- glm(cbind(illegitimate, legitimate) ~ 1, family = binomial, data = Grossarl,
subset = time(fraction) <= 1800)
sctest(m, order.by = 1700:1800, functional = "CvM")
}
\keyword{htest}
| /man/sctest.default.Rd | no_license | mengluchu/strucchange | R | false | false | 7,563 | rd | \name{sctest.default}
\alias{sctest.default}
\title{Structural Change Tests in Parametric Models}
\description{
Performs model-based tests for structural change (or parameter instability)
in parametric models.
}
\usage{
\method{sctest}{default}(x, order.by = NULL, functional = maxBB,
vcov = NULL, scores = estfun, decorrelate = TRUE, sandwich = TRUE,
parm = NULL, plot = FALSE, from = 0.1, to = NULL, nobs = NULL,
nrep = 50000, width = 0.15, xlab = NULL, \dots)
}
\arguments{
\item{x}{a model object. The model class can in principle be arbitrary
but needs to provide suitable methods for extracting the \code{scores}
and associated variance-covariance matrix \code{vcov}.}
\item{order.by}{either a vector \code{z} or a formula with a single explanatory
variable like \code{~ z}. The observations in the model
are ordered by the size of \code{z}. If set to \code{NULL} (the
default) the observations are assumed to be ordered (e.g., a
time series).}
\item{functional}{either a character specification of the functional
to be used or an \code{\link{efpFunctional}} object. For a list
of functionals see the details.}
\item{vcov}{a function to extract the covariance matrix
for the coefficients of the fitted model:
\code{vcov(x, order.by = order.by, data = data)}.
Alternatively, the character string \code{"info"}, for details see
below.}
\item{scores}{a function which extracts the scores or estimating
function from the fitted object: \code{scores(x)}, by default
this is \code{\link[sandwich]{estfun}}.}
\item{decorrelate}{logical. Should the process be decorrelated?}
\item{sandwich}{logical. Is the function \code{vcov} the full sandwich
estimator or only the meat?}
\item{parm}{integer or character specifying the component of the estimating
functions which should be used (by default all components are used).}
\item{plot}{logical. Should the result of the test also be visualized?}
\item{from, to}{numeric. In case the \code{functional} is \code{"supLM"}
(or equivalently \code{"maxLM"}), \code{from} and \code{to} can be
passed to the \code{\link{supLM}} functional.}
\item{nobs, nrep}{numeric. In case the \code{functional} is \code{"maxLMo"},
\code{nobs} and \code{nrep} are passed to the \code{\link{catL2BB}} functional.}
\item{width}{numeric. In case the \code{functional} is \code{"MOSUM"},
the bandwidth \code{width} is passed to the \code{\link{maxMOSUM}}
functional.}
\item{xlab, \dots}{graphical parameters passed to the plot method (in case
\code{plot = TRUE}).}
}
\details{
\code{sctest.default} is a convenience interface to \code{\link{gefp}} for
structural change tests (or parameter instability tests) in general
parametric models. It proceeds in the following steps:
\enumerate{
\item The generalized empirical fluctuation process (or score-based CUSUM process)
is computed via \code{scus <- gefp(x, fit = NULL, \dots)} where \code{\dots}
comprises the arguments \code{order.by}, \code{vcov}, \code{scores}, \code{decorrelate},
\code{sandwich}, \code{parm} that are simply passed on to \code{\link{gefp}}.
\item The empirical fluctuation process is visualized (if \code{plot = TRUE}) via
\code{plot(scus, functional = functional, \dots)}.
\item The empirical fluctuation is assessed by the corresponding significance test
via \code{sctest(scus, functional = functional)}.
}
The main motivation for prociding the convenience interface is that these three
steps can be easily carried out in one go along with a two convenience options:
\enumerate{
\item By default, the covariance is computed by an outer-product of gradients
estimator just as in \code{gefp}. This is always available based on the \code{scores}.
Additionally, by setting \code{vcov = "info"}, the corresponding information
matrix can be used. Then the average information is assumed to be provided by
the \code{vcov} method for the model class. (Note that this is only sensible
for models estimated by maximum likelihood.)
\item Instead of providing the \code{functional} by an \code{\link{efpFunctional}}
object, the test labels employed by Merkle and Zeileis (2013) and Merkle, Fan,
and Zeileis (2013) can be used for convenience. Namely, for continuous numeric
orderings, the following functionals are available:
\code{functional = "DM"} or \code{"dmax"} provides the double-maximum test (\code{\link{maxBB}}).
\code{"CvM"} is the Cramer-von Mises functional \code{\link{meanL2BB}}.
\code{"supLM"} or equivalently \code{"maxLM"} is Andrews' supLM test
(\code{\link{supLM}}). \code{"MOSUM"} or \code{"maxMOSUM"} is the MOSUM
functional (\code{\link{maxMOSUM}}), and \code{"range"} is the range
functional \code{\link{rangeBB}}. Furthermore, several functionals suitable
for (ordered) categorical \code{order.by} variables are provided:
\code{"LMuo"} is the unordered LM test (\code{\link{catL2BB}}),
\code{"WDMo"} is the weighted double-maximum test for ordered variables
(\code{\link{ordwmax}}), and \code{"maxLMo"} is the maxLM test for
ordered variables (\code{\link{ordL2BB}}).
}
The theoretical model class is introduced in Zeileis and Hornik (2007) with a
unifying view in Zeileis (2005), especially from an econometric perspective.
Zeileis (2006) introduces the underling computational tools \code{gefp} and
\code{efpFunctional}.
Merkle and Zeileis (2013) discuss the methods in the context of measurement
invariance which is particularly relevant to psychometric models for cross section
data. Merkle, Fan, and Zeileis (2013) extend the results to ordered categorical
variables.
Zeileis, Shah, and Patnaik (2013) provide a unifying discussion in the context
of time series methods, specifically in financial econometrics.
}
\value{
An object of class \code{"htest"} containing:
\item{statistic}{the test statistic,}
\item{p.value}{the corresponding p value,}
\item{method}{a character string with the method used,}
\item{data.name}{a character string with the data name.}
}
\references{
Merkle E.C., Zeileis A. (2013), Tests of Measurement Invariance without Subgroups:
A Generalization of Classical Methods. \emph{Psychometrika}, \bold{78}(1), 59--82.
doi:10.1007/S11336-012-9302-4
Merkle E.C., Fan J., Zeileis A. (2013), Testing for Measurement Invariance with
Respect to an Ordinal Variable. \emph{Psychometrika}, Forthcoming.
Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on
ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24},
445--466. doi:10.1080/07474930500406053.
Zeileis A. (2006), Implementing a Class of Structural Change Tests: An
Econometric Computing Approach. \emph{Computational Statistics & Data Analysis},
\bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001.
Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter
Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508.
doi:10.1111/j.1467-9574.2007.00371.x.
Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural
Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis},
Forthcoming. doi:10.1016/j.csda.2009.12.005.
}
\seealso{\code{\link{gefp}}, \code{\link{efpFunctional}}}
\examples{
## Zeileis and Hornik (2007), Section 5.3, Figure 6
data("Grossarl")
m <- glm(cbind(illegitimate, legitimate) ~ 1, family = binomial, data = Grossarl,
subset = time(fraction) <= 1800)
sctest(m, order.by = 1700:1800, functional = "CvM")
}
\keyword{htest}
|
#Copyright © 2016 RTE Réseau de transport d’électricité
#' Change the timestep of an output
#'
#' This function changes the timestep of a table or an \code{antaresData} object
#' and performs the required aggregation or desaggregation. We can specify
#' (des)aggregate functions by columns, see the param `fun`.
#'
#' @param x
#' data.table with a column "timeId" or an object of class "antaresDataList"
#' @param newTimeStep
#' Desired time step.The possible values are hourly, daily, weekly,
#' monthly and annual.
#' @param oldTimeStep
#' Current time step of the data. This argument is optional for an object of
#' class \code{antaresData} because the time step of the data is stored inside
#' the object
#' @param fun
#' Character vector with one element per column to (des)aggregate indicating
#' the function to use ("sum", "mean", "min" or "max") for this column. It can
#' be a single element, in that case the same function is applied to every
#' columns.
#' @inheritParams readAntares
#'
#' @return
#' Either a data.table or an object of class "antaresDataList" depending on the
#' class of \code{x}
#'
#' @examples
#' \dontrun{
#' setSimulationPath()
#'
#' areasH <- readAntares(select = "LOAD", synthesis = FALSE, mcYears = 1)
#' areasD <- readAntares(select = "LOAD", synthesis = FALSE, mcYears = 1, timeStep ="daily")
#'
#' areasDAgg <- changeTimeStep(areasH, "daily", "hourly")
#'
#' all.equal(areasDAgg$LOAD, areasD$LOAD)
#'
#' # Use different aggregation functions
#' mydata <- readAntares(select = c("LOAD", "MRG. PRICE"), timeStep = "monthly")
#' changeTimeStep(mydata, "annual", fun = c("sum", "mean"))
#' }
#'
#' @export
#'
changeTimeStep <- function(x, newTimeStep, oldTimeStep, fun = "sum", opts=simOptions()) {
fun <- match.arg(fun, c("sum", "mean", "min", "max"), several.ok = TRUE)
# Agregation function
funs <- list(sum = function(x) sum(as.numeric(x)), mean = mean, min = min, max = max)
#desagregation function
ifuns <- list(sum = function(x, n) {x / n},
mean = function(x, n) {x},
min = function(x, n) {x},
max = function(x, n) {x})
if (is(x, "antaresData")) {
opts <- simOptions(x)
oldTimeStep <- attr(x, "timeStep")
}
if (newTimeStep == oldTimeStep) return(x)
.checkIfWeWantToAggregate(newTimeStep, oldTimeStep)
if (is(x, "antaresDataList")) {
for (i in 1:length(x)) {
x[[i]] <- changeTimeStep(x[[i]], newTimeStep, oldTimeStep, fun, opts)
}
attr(x, "timeStep") <- newTimeStep
return(x)
}
# Keep a copy of attributes to put them back at the end of the function
synthesis <- attr(x, "synthesis")
type <- attr(x, "type")
# Should we had date-time columns ?
addDateTimeCol <- !is.null(x[["time"]])
# Suppress time variables
if (!is.null(x[["time"]])) x$time <- NULL
if (!is.null(x$hour)) x$hour <- NULL
if (!is.null(x$day)) x$day <- NULL
if (!is.null(x$week)) x$week <- NULL
if (!is.null(x$month)) x$month <- NULL
# Strategy: if oldTimeStep is not hourly, first desagregate data at hourly
# level. Then, in all cases aggregate hourly data at the desired level.
refTime <- data.table(
oldTimeId = .getTimeId(opts$timeIdMin:opts$timeIdMax, oldTimeStep, opts),
timeId = .getTimeId(opts$timeIdMin:opts$timeIdMax, newTimeStep, opts)
)
x <- copy(x)
setnames(x, "timeId", "oldTimeId")
x <- merge(x, refTime, by = "oldTimeId", allow.cartesian=TRUE)
# Desagregation
if (oldTimeStep != "hourly") {
idVars <- c(.idCols(x), "oldTimeId")
idVars <- idVars[idVars != "timeId"]
by <- parse(text = sprintf("list(%s)", paste(idVars, collapse = ", ")))
# Code below is a bit hacky: we want to use a window function on all variables but one (timeId).
# Instead of writing a loop on the columns, we separate the timeId column
# from the table, use the syntax of data.table to perform the window function
# and finally put back the timeId in the table.
setorderv(x, idVars)
timeId <- x$timeId
x$timeId <- NULL
if (length(fun) == 1) fun <- rep(fun, ncol(x) - length(idVars))
x <- x[, mapply(function(x, f) {f(x, .N)}, x = .SD, f = ifuns[fun], SIMPLIFY=FALSE), by = eval(by)]
x$timeId <- timeId
.reorderCols(x)
}
x$oldTimeId <- NULL
# Aggregation
if (newTimeStep != "hourly") {
idVars <- .idCols(x)
if (length(fun) == 1) fun <- rep(fun, ncol(x) - length(idVars))
x <- x[, mapply(function(x, f) {f(x)}, x = .SD, f = funs[fun], SIMPLIFY=FALSE), keyby=idVars]
}
x <- .addClassAndAttributes(x, synthesis, newTimeStep, opts, type = type)
if(addDateTimeCol) addDateTimeColumns(x)
x
}
.getTimeId <- function(hourId, timeStep, opts) {
# Easy cases
if (timeStep == "hourly") return(hourId)
if (timeStep == "daily") {
return( (hourId - 1) %/% 24 + 1 )
}
if (timeStep == "annual") {
return(rep(1L, length(hourId)))
}
# Hard cases
# Create a correlation table between hourIds and actual dates and compute new
# timeIds based on the actual dates
timeRef <- data.table(hourId = 1:(24*365))
tmp <- as.POSIXct(opts$start)
lubridate::hour(tmp) <- lubridate::hour(tmp) + timeRef$hourId - 1
timeRef$hour <- tmp
if (timeStep == "weekly") {
timeRef$wday <- lubridate::wday(timeRef$hour)
startWeek <- which(c("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday") == opts$firstWeekday)
timeRef[, change := wday == startWeek & wday != shift(wday)]
timeRef$change[1] <- TRUE
timeRef[, timeId := cumsum(change)]
return(timeRef$timeId[hourId])
}
if (timeStep == "monthly") {
timeRef$month <- lubridate::month(timeRef$hour)
timeRef[, change := month != shift(month)]
timeRef$change[1] <- TRUE
timeRef[, timeId := cumsum(change)]
return(timeRef$timeId[hourId])
}
}
.checkIfWeWantToAggregate<-function(newTimeStep, oldTimeStep){
yesWeWantToAggregate<-FALSE
if(oldTimeStep=="hourly" & oldTimeStep!=newTimeStep){
yesWeWantToAggregate<-TRUE
} else if (oldTimeStep=="daily" & (newTimeStep=="weekly" | newTimeStep=="monthly" | newTimeStep=="annual" )){
yesWeWantToAggregate<-TRUE
} else if (oldTimeStep=="weekly" & (newTimeStep=="monthly" | newTimeStep=="annual")){
yesWeWantToAggregate<-TRUE
} else if (oldTimeStep=="monthly" & (newTimeStep=="annual")){
yesWeWantToAggregate<-TRUE
} else{
yesWeWantToAggregate<-FALSE
}
if(yesWeWantToAggregate){
warning('Aggregation will be perform approximatively because optimization variables in ANTARES are doubles but ANTARES write only integers in TXT files, with this transformation we lose precision. If you want accurate data then you must import the corresponding data with `readAntares`')
}
}
| /R/changeTimeStep.R | no_license | cran/antaresRead | R | false | false | 7,083 | r | #Copyright © 2016 RTE Réseau de transport d’électricité
#' Change the timestep of an output
#'
#' This function changes the timestep of a table or an \code{antaresData} object
#' and performs the required aggregation or desaggregation. We can specify
#' (des)aggregate functions by columns, see the param `fun`.
#'
#' @param x
#' data.table with a column "timeId" or an object of class "antaresDataList"
#' @param newTimeStep
#' Desired time step.The possible values are hourly, daily, weekly,
#' monthly and annual.
#' @param oldTimeStep
#' Current time step of the data. This argument is optional for an object of
#' class \code{antaresData} because the time step of the data is stored inside
#' the object
#' @param fun
#' Character vector with one element per column to (des)aggregate indicating
#' the function to use ("sum", "mean", "min" or "max") for this column. It can
#' be a single element, in that case the same function is applied to every
#' columns.
#' @inheritParams readAntares
#'
#' @return
#' Either a data.table or an object of class "antaresDataList" depending on the
#' class of \code{x}
#'
#' @examples
#' \dontrun{
#' setSimulationPath()
#'
#' areasH <- readAntares(select = "LOAD", synthesis = FALSE, mcYears = 1)
#' areasD <- readAntares(select = "LOAD", synthesis = FALSE, mcYears = 1, timeStep ="daily")
#'
#' areasDAgg <- changeTimeStep(areasH, "daily", "hourly")
#'
#' all.equal(areasDAgg$LOAD, areasD$LOAD)
#'
#' # Use different aggregation functions
#' mydata <- readAntares(select = c("LOAD", "MRG. PRICE"), timeStep = "monthly")
#' changeTimeStep(mydata, "annual", fun = c("sum", "mean"))
#' }
#'
#' @export
#'
changeTimeStep <- function(x, newTimeStep, oldTimeStep, fun = "sum", opts=simOptions()) {
fun <- match.arg(fun, c("sum", "mean", "min", "max"), several.ok = TRUE)
# Agregation function
funs <- list(sum = function(x) sum(as.numeric(x)), mean = mean, min = min, max = max)
#desagregation function
ifuns <- list(sum = function(x, n) {x / n},
mean = function(x, n) {x},
min = function(x, n) {x},
max = function(x, n) {x})
if (is(x, "antaresData")) {
opts <- simOptions(x)
oldTimeStep <- attr(x, "timeStep")
}
if (newTimeStep == oldTimeStep) return(x)
.checkIfWeWantToAggregate(newTimeStep, oldTimeStep)
if (is(x, "antaresDataList")) {
for (i in 1:length(x)) {
x[[i]] <- changeTimeStep(x[[i]], newTimeStep, oldTimeStep, fun, opts)
}
attr(x, "timeStep") <- newTimeStep
return(x)
}
# Keep a copy of attributes to put them back at the end of the function
synthesis <- attr(x, "synthesis")
type <- attr(x, "type")
# Should we had date-time columns ?
addDateTimeCol <- !is.null(x[["time"]])
# Suppress time variables
if (!is.null(x[["time"]])) x$time <- NULL
if (!is.null(x$hour)) x$hour <- NULL
if (!is.null(x$day)) x$day <- NULL
if (!is.null(x$week)) x$week <- NULL
if (!is.null(x$month)) x$month <- NULL
# Strategy: if oldTimeStep is not hourly, first desagregate data at hourly
# level. Then, in all cases aggregate hourly data at the desired level.
refTime <- data.table(
oldTimeId = .getTimeId(opts$timeIdMin:opts$timeIdMax, oldTimeStep, opts),
timeId = .getTimeId(opts$timeIdMin:opts$timeIdMax, newTimeStep, opts)
)
x <- copy(x)
setnames(x, "timeId", "oldTimeId")
x <- merge(x, refTime, by = "oldTimeId", allow.cartesian=TRUE)
# Desagregation
if (oldTimeStep != "hourly") {
idVars <- c(.idCols(x), "oldTimeId")
idVars <- idVars[idVars != "timeId"]
by <- parse(text = sprintf("list(%s)", paste(idVars, collapse = ", ")))
# Code below is a bit hacky: we want to use a window function on all variables but one (timeId).
# Instead of writing a loop on the columns, we separate the timeId column
# from the table, use the syntax of data.table to perform the window function
# and finally put back the timeId in the table.
setorderv(x, idVars)
timeId <- x$timeId
x$timeId <- NULL
if (length(fun) == 1) fun <- rep(fun, ncol(x) - length(idVars))
x <- x[, mapply(function(x, f) {f(x, .N)}, x = .SD, f = ifuns[fun], SIMPLIFY=FALSE), by = eval(by)]
x$timeId <- timeId
.reorderCols(x)
}
x$oldTimeId <- NULL
# Aggregation
if (newTimeStep != "hourly") {
idVars <- .idCols(x)
if (length(fun) == 1) fun <- rep(fun, ncol(x) - length(idVars))
x <- x[, mapply(function(x, f) {f(x)}, x = .SD, f = funs[fun], SIMPLIFY=FALSE), keyby=idVars]
}
x <- .addClassAndAttributes(x, synthesis, newTimeStep, opts, type = type)
if(addDateTimeCol) addDateTimeColumns(x)
x
}
.getTimeId <- function(hourId, timeStep, opts) {
# Easy cases
if (timeStep == "hourly") return(hourId)
if (timeStep == "daily") {
return( (hourId - 1) %/% 24 + 1 )
}
if (timeStep == "annual") {
return(rep(1L, length(hourId)))
}
# Hard cases
# Create a correlation table between hourIds and actual dates and compute new
# timeIds based on the actual dates
timeRef <- data.table(hourId = 1:(24*365))
tmp <- as.POSIXct(opts$start)
lubridate::hour(tmp) <- lubridate::hour(tmp) + timeRef$hourId - 1
timeRef$hour <- tmp
if (timeStep == "weekly") {
timeRef$wday <- lubridate::wday(timeRef$hour)
startWeek <- which(c("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday") == opts$firstWeekday)
timeRef[, change := wday == startWeek & wday != shift(wday)]
timeRef$change[1] <- TRUE
timeRef[, timeId := cumsum(change)]
return(timeRef$timeId[hourId])
}
if (timeStep == "monthly") {
timeRef$month <- lubridate::month(timeRef$hour)
timeRef[, change := month != shift(month)]
timeRef$change[1] <- TRUE
timeRef[, timeId := cumsum(change)]
return(timeRef$timeId[hourId])
}
}
.checkIfWeWantToAggregate<-function(newTimeStep, oldTimeStep){
yesWeWantToAggregate<-FALSE
if(oldTimeStep=="hourly" & oldTimeStep!=newTimeStep){
yesWeWantToAggregate<-TRUE
} else if (oldTimeStep=="daily" & (newTimeStep=="weekly" | newTimeStep=="monthly" | newTimeStep=="annual" )){
yesWeWantToAggregate<-TRUE
} else if (oldTimeStep=="weekly" & (newTimeStep=="monthly" | newTimeStep=="annual")){
yesWeWantToAggregate<-TRUE
} else if (oldTimeStep=="monthly" & (newTimeStep=="annual")){
yesWeWantToAggregate<-TRUE
} else{
yesWeWantToAggregate<-FALSE
}
if(yesWeWantToAggregate){
warning('Aggregation will be perform approximatively because optimization variables in ANTARES are doubles but ANTARES write only integers in TXT files, with this transformation we lose precision. If you want accurate data then you must import the corresponding data with `readAntares`')
}
}
|
# Gierzwaluw (common swift) analysis using osmenrich
# Last edited 2021-02-16 by @vankesteren
# CC-BY ODISSEI SoDa team
# Packages ----
# Data
library(tidyverse)
library(sf)
library(osmenrich)
# Modeling
library(mgcv)
# Plotting
library(ggspatial)
library(firatheme) # remotes::install_github("vankesteren/firatheme")
library(ggeffects)
# Optional: use local version of osm
# osmdata::set_overpass_url("http://localhost:8888/api/interpreter")
# Data loading ----
data_url <- "https://ckan.dataplatform.nl/dataset/8ceaae10-fb90-4ec0-961c-ef02691bb861/resource/baae4cde-cf33-416b-aa4e-d0fba160eed9/download/gierzwaluwinventarisatie2014.csv"
bird_sf <-
read_csv(data_url) %>%
drop_na(latitude, longitude, `aantal nesten`) %>%
st_as_sf(coords = c("longitude", "latitude"), crs = 4326) %>%
select(nestcount = "aantal nesten", geometry)
# Plot data
plot_bird <-
ggplot(bird_sf, aes(colour = nestcount, size = nestcount)) +
annotation_map_tile(zoomin = 0) +
geom_sf() +
theme_fira() +
scale_colour_viridis_c() +
scale_size(range = c(1, 3), guide = FALSE) +
theme(axis.text.x = element_text(angle = 90)) +
labs(title = "Common swift nests", colour = "")
plot_bird
# Data augmentation ----
# randomly sample 200 non-bird sites:
# centroids of grid cells where nests were not found
# ASSUMPTION: The common swift inventarisation is comprehensive over the grid.
set.seed(45)
bird_grid <- st_make_grid(bird_sf, n = c(40, 60))
has_nests <- apply(st_contains(bird_grid, bird_sf, sparse = FALSE), 1, any)
zerosample <- st_centroid(sample(bird_grid[!has_nests], 200))
nonbird_sf <- st_sf(geometry = zerosample) %>% mutate(nestcount = 0)
bird_sf <- bind_rows(bird_sf, nonbird_sf)
# Plot augmented data
plot_bird +
geom_sf(data = nonbird_sf, colour = "black", bg = "black", size = 1, shape = 22)
# Data enrichment ----
# using osmenrich for computing the following two features:
# The number of shops as a proxy of commercial activity
# The number of trees as a proxy of natural material availability
bird_sf <-
bird_sf %>%
enrich_osm(
name = "commercial_activity_1km",
key = "shop",
kernel = "gaussian",
r = 1000
) %>%
enrich_osm(
name = "tree_1km",
key = "natural",
value = "tree",
kernel = "gaussian",
r = 1000
)
# We can do the same for a grid covering the bounding box of the data
grid_sf_c <-
st_centroid(bird_grid) %>%
st_sf() %>%
enrich_osm(
name = "commercial_activity_1km",
key = "shop",
kernel = "gaussian",
r = 1000,
.verbose = TRUE
) %>%
enrich_osm(
name = "tree_1km",
key = "natural",
value = "tree",
kernel = "gaussian",
r = 1000,
.verbose = TRUE
)
grid_sf_p <- grid_sf_c %>% st_set_geometry(bird_grid)
# Plot predictors on the grid
plot_commercial_activity <-
ggplot() +
annotation_map_tile(zoomin = 0) +
geom_sf(data = grid_sf_p, aes(fill = commercial_activity_1km), colour = NA, alpha = 0.7) +
geom_sf(data = bird_sf %>% filter(nestcount > 0), colour = "black") +
theme_fira() +
scale_fill_viridis_c() +
labs(title = "Commercial activity", fill = "") +
theme(axis.text.x = element_text(angle = 90))
plot_commercial_activity
plot_trees <-
ggplot() +
annotation_map_tile(zoomin = 0) +
geom_sf(data = grid_sf_p, aes(fill = tree_1km), colour = NA, alpha = 0.7) +
geom_sf(data = bird_sf %>% filter(nestcount > 0), colour = "black") +
theme_fira() +
scale_fill_viridis_c() +
labs(title = "Trees", fill = "") +
theme(axis.text.x = element_text(angle = 90))
plot_trees
# Modeling ----
# zero-inflated poisson spatial generalized additive model
# We need a function to turn sf with coordinates into df with x and y
sf_to_df <- function(sf) {
bind_cols(
as_tibble(sf) %>% select(-geometry),
st_coordinates(sf) %>% as_tibble() %>% set_names(c("x", "y"))
)
}
# generalized additive model with mgcv
fit <- gam(
nestcount ~
te(x, y, bs = "ts") + # bivariate regularized thin plate spline
poly(commercial_activity_1km, 2) + # quadratic effect of commercial activity
tree_1km, # natural material availability proxy
family = "ziP", # zero-inflated poisson
data = sf_to_df(bird_sf)
)
# Interpretation and prediction ----
# effects of spline, commercial activity, and natural material availability
summary(fit)
# plot the marginal effect of commercial activity
values <- seq(0, 0.10, 0.01)
plot_marginal <-
ggpredict(fit, terms = "commercial_activity_1km [values]",
condition = c(x = 5.10, y = 52.125)) %>%
ggplot(aes(x = x, y = predicted, ymin = conf.low, ymax = conf.high)) +
geom_ribbon(alpha = 0.4, fill = firaCols[3]) +
geom_line(colour = firaCols[3], size = 1) +
theme_fira() +
labs(y = "Predicted nests", x = "Commercial activity",
title = "Marginal effect of commercial activity")
plot_marginal
# create predictions over previously defined grid
pred <- predict(fit, newdata = sf_to_df(grid_sf_c), type = "response", se = TRUE)
# plot the predictions over the grid
grid_sf_p$pred <- pred$fit
grid_sf_p$se <- pred$se.fit
plot_pred <-
ggplot() +
annotation_map_tile(zoomin = 0) +
geom_sf(data = grid_sf_p, aes(fill = pred), colour = NA, alpha = 0.7) +
geom_sf(data = bird_sf %>% filter(nestcount > 0), colour = "black") +
theme_fira() +
scale_fill_viridis_c() +
labs(title = "Nest count", fill = "") +
theme(axis.text.x = element_text(angle = 90))
plot_pred
# Plot the spline only
grid_sf_c2 <-
grid_sf_c %>%
mutate(
commercial_activity_1km = 0,
grass_300m = 0,
tree_300m = 0
)
pred2 <- predict(fit, newdata = sf_to_df(grid_sf_c2), type = "response", se = TRUE)
grid_sf_p$pred2 <- pred2$fit
plot_pred_smooth_only <-
ggplot() +
annotation_map_tile(zoomin = 0) +
geom_sf(data = grid_sf_p, aes(fill = pred2), colour = NA, alpha = 0.7) +
geom_sf(data = bird_sf %>% filter(nestcount > 0), colour = "black") +
theme_fira() +
scale_fill_viridis_c() +
labs(title = "Spline-only predictions", fill = "") +
theme(axis.text.x = element_text(angle = 90))
plot_pred_smooth_only
| /gierzwaluw_analysis.R | no_license | sodascience/presentation-osmenrich-sig | R | false | false | 6,120 | r | # Gierzwaluw (common swift) analysis using osmenrich
# Last edited 2021-02-16 by @vankesteren
# CC-BY ODISSEI SoDa team
# Packages ----
# Data
library(tidyverse)
library(sf)
library(osmenrich)
# Modeling
library(mgcv)
# Plotting
library(ggspatial)
library(firatheme) # remotes::install_github("vankesteren/firatheme")
library(ggeffects)
# Optional: use local version of osm
# osmdata::set_overpass_url("http://localhost:8888/api/interpreter")
# Data loading ----
data_url <- "https://ckan.dataplatform.nl/dataset/8ceaae10-fb90-4ec0-961c-ef02691bb861/resource/baae4cde-cf33-416b-aa4e-d0fba160eed9/download/gierzwaluwinventarisatie2014.csv"
bird_sf <-
read_csv(data_url) %>%
drop_na(latitude, longitude, `aantal nesten`) %>%
st_as_sf(coords = c("longitude", "latitude"), crs = 4326) %>%
select(nestcount = "aantal nesten", geometry)
# Plot data
plot_bird <-
ggplot(bird_sf, aes(colour = nestcount, size = nestcount)) +
annotation_map_tile(zoomin = 0) +
geom_sf() +
theme_fira() +
scale_colour_viridis_c() +
scale_size(range = c(1, 3), guide = FALSE) +
theme(axis.text.x = element_text(angle = 90)) +
labs(title = "Common swift nests", colour = "")
plot_bird
# Data augmentation ----
# randomly sample 200 non-bird sites:
# centroids of grid cells where nests were not found
# ASSUMPTION: The common swift inventarisation is comprehensive over the grid.
set.seed(45)
bird_grid <- st_make_grid(bird_sf, n = c(40, 60))
has_nests <- apply(st_contains(bird_grid, bird_sf, sparse = FALSE), 1, any)
zerosample <- st_centroid(sample(bird_grid[!has_nests], 200))
nonbird_sf <- st_sf(geometry = zerosample) %>% mutate(nestcount = 0)
bird_sf <- bind_rows(bird_sf, nonbird_sf)
# Plot augmented data
plot_bird +
geom_sf(data = nonbird_sf, colour = "black", bg = "black", size = 1, shape = 22)
# Data enrichment ----
# using osmenrich for computing the following two features:
# The number of shops as a proxy of commercial activity
# The number of trees as a proxy of natural material availability
bird_sf <-
bird_sf %>%
enrich_osm(
name = "commercial_activity_1km",
key = "shop",
kernel = "gaussian",
r = 1000
) %>%
enrich_osm(
name = "tree_1km",
key = "natural",
value = "tree",
kernel = "gaussian",
r = 1000
)
# We can do the same for a grid covering the bounding box of the data
grid_sf_c <-
st_centroid(bird_grid) %>%
st_sf() %>%
enrich_osm(
name = "commercial_activity_1km",
key = "shop",
kernel = "gaussian",
r = 1000,
.verbose = TRUE
) %>%
enrich_osm(
name = "tree_1km",
key = "natural",
value = "tree",
kernel = "gaussian",
r = 1000,
.verbose = TRUE
)
grid_sf_p <- grid_sf_c %>% st_set_geometry(bird_grid)
# Plot predictors on the grid
plot_commercial_activity <-
ggplot() +
annotation_map_tile(zoomin = 0) +
geom_sf(data = grid_sf_p, aes(fill = commercial_activity_1km), colour = NA, alpha = 0.7) +
geom_sf(data = bird_sf %>% filter(nestcount > 0), colour = "black") +
theme_fira() +
scale_fill_viridis_c() +
labs(title = "Commercial activity", fill = "") +
theme(axis.text.x = element_text(angle = 90))
plot_commercial_activity
plot_trees <-
ggplot() +
annotation_map_tile(zoomin = 0) +
geom_sf(data = grid_sf_p, aes(fill = tree_1km), colour = NA, alpha = 0.7) +
geom_sf(data = bird_sf %>% filter(nestcount > 0), colour = "black") +
theme_fira() +
scale_fill_viridis_c() +
labs(title = "Trees", fill = "") +
theme(axis.text.x = element_text(angle = 90))
plot_trees
# Modeling ----
# zero-inflated poisson spatial generalized additive model
# We need a function to turn sf with coordinates into df with x and y
sf_to_df <- function(sf) {
bind_cols(
as_tibble(sf) %>% select(-geometry),
st_coordinates(sf) %>% as_tibble() %>% set_names(c("x", "y"))
)
}
# generalized additive model with mgcv
fit <- gam(
nestcount ~
te(x, y, bs = "ts") + # bivariate regularized thin plate spline
poly(commercial_activity_1km, 2) + # quadratic effect of commercial activity
tree_1km, # natural material availability proxy
family = "ziP", # zero-inflated poisson
data = sf_to_df(bird_sf)
)
# Interpretation and prediction ----
# effects of spline, commercial activity, and natural material availability
summary(fit)
# plot the marginal effect of commercial activity
values <- seq(0, 0.10, 0.01)
plot_marginal <-
ggpredict(fit, terms = "commercial_activity_1km [values]",
condition = c(x = 5.10, y = 52.125)) %>%
ggplot(aes(x = x, y = predicted, ymin = conf.low, ymax = conf.high)) +
geom_ribbon(alpha = 0.4, fill = firaCols[3]) +
geom_line(colour = firaCols[3], size = 1) +
theme_fira() +
labs(y = "Predicted nests", x = "Commercial activity",
title = "Marginal effect of commercial activity")
plot_marginal
# create predictions over previously defined grid
pred <- predict(fit, newdata = sf_to_df(grid_sf_c), type = "response", se = TRUE)
# plot the predictions over the grid
grid_sf_p$pred <- pred$fit
grid_sf_p$se <- pred$se.fit
plot_pred <-
ggplot() +
annotation_map_tile(zoomin = 0) +
geom_sf(data = grid_sf_p, aes(fill = pred), colour = NA, alpha = 0.7) +
geom_sf(data = bird_sf %>% filter(nestcount > 0), colour = "black") +
theme_fira() +
scale_fill_viridis_c() +
labs(title = "Nest count", fill = "") +
theme(axis.text.x = element_text(angle = 90))
plot_pred
# Plot the spline only
grid_sf_c2 <-
grid_sf_c %>%
mutate(
commercial_activity_1km = 0,
grass_300m = 0,
tree_300m = 0
)
pred2 <- predict(fit, newdata = sf_to_df(grid_sf_c2), type = "response", se = TRUE)
grid_sf_p$pred2 <- pred2$fit
plot_pred_smooth_only <-
ggplot() +
annotation_map_tile(zoomin = 0) +
geom_sf(data = grid_sf_p, aes(fill = pred2), colour = NA, alpha = 0.7) +
geom_sf(data = bird_sf %>% filter(nestcount > 0), colour = "black") +
theme_fira() +
scale_fill_viridis_c() +
labs(title = "Spline-only predictions", fill = "") +
theme(axis.text.x = element_text(angle = 90))
plot_pred_smooth_only
|
#********************************Uber project***********************************************************************
library(dplyr)
library(ggplot2)
library(tidyr)
library(lubridate)
require(lubridate)
library(stringr)
library(reshape)
#getwd()
#set working directory
#setwd("C:/Users/abhishek.saxena02/Documents/BI-OLAP-LABSESSION/INPUT")
# Loading dataset
uber <- read.csv("Uber Request Data.csv",stringsAsFactors = F)
summary(uber)
str(uber)
#****************************************************************************************************************************
#Data Cleaning (Suitable date format of R), converting the multiple date format into the
#default date format and saving into the Request.timestamp, Drop.timestamp respectively
#replacing the old Request.timestamp, drop.timestamp columns with formatted dated values.
#Handling of the NA values
#****************************************************************************************************************************
Ra<-as.POSIXct(uber$Request.timestamp, format = "%d-%m-%Y %H:%M:%S", tz = "GMT")
Rb<-as.POSIXct(uber$Request.timestamp, format = "%d/%m/%Y %H:%M", tz = "GMT")
Ra[is.na(Ra)] <- Rb[!is.na(Rb)]
typeof(Ra)
uber$Request.timestamp<-Ra
dummy_Drop.timestamp<-uber$Drop.timestamp[1:2831]
Da<-as.POSIXct(dummy_Drop.timestamp, format = "%d-%m-%Y %H:%M:%S", tz = "GMT" )
Db<-as.POSIXct(dummy_Drop.timestamp, format = "%d/%m/%Y %H:%M", tz = "GMT")
Da[is.na(Da)] <- Db[!is.na(Db)]
nrow(uber)
Da[2832:6745]<-NA
uber$Drop.timestamp<-Da
View(uber)
#****************************************************************************************************************************
# Request_hour, Derived a new column to get the insights on the specific time of the day
#****************************************************************************************************************************
uber$Request_hour<-format(uber$Request.timestamp, "%H")
uber$Request_hour<-as.numeric(uber$Request_hour)
#****************************************************************************************************************************
#weekday, Derived a new column to get the insights on the day of the week
#****************************************************************************************************************************
uber$Request_weekday<-weekdays(uber$Request.timestamp)
#****************************************************************************************************************************
#Funtion to derive the new column type of request
#Type of Request, from Airport-city or City-Airport
#****************************************************************************************************************************
Request_type<- function(a) {
if (a[]=="Airport"){
x<- paste(a[], "City", sep="-")
}else {x<- paste(a[], "Airport", sep="-")}
}
uber$type_of_request <-sapply(uber$Pickup.point, Request_type)
#****************************************************************************************************************************
#Function to derive the peak hour column based on the hour of the day
#Hour_of_the_day
#****************************************************************************************************************************
hour_fun <- function(a) {
if (a[]>=5 & a[]<10)
{
x<-"morning_peak_hrs"
}
else if (a[]>=17 & a[]<=22)
{
x<-"Evening_peak_hrs"
}
else{
x<-"Normal_hrs"
}
}
uber$Hour_of_the_day<-sapply(uber$Request_hour, hour_fun)
#****************************************************************************************************************************
#Univariate analysis
#For Status column
#percentage of cancelled request
#percentage of No cars available request
#percentage of No cars available request
#pie chart representation for the request
#****************************************************************************************************************************
select(uber, Status)%>% group_by(Status)%>%summarise(count=n())->Total_Status
View(Total_Status)
cancelled_percent<-(Total_Status$count[which(Total_Status$Status== "Cancelled")]/sum(Total_Status$count))*100
No_Cars_Available_percent<-(Total_Status$count[which(Total_Status$Status== "No Cars Available")]/sum(Total_Status$count))*100
Trip_Completed_percent<-(Total_Status$count[which(Total_Status$Status== "Trip Completed")]/sum(Total_Status$count))*100
pie(Total_Status$count, labels = Total_Status$Status, main = "Pie chart for the request")
#****************************************************************************************************************************
#PLOT:1 Plot of pickup point vs frequency or count of the request and with status
#****************************************************************************************************************************
ggplot(uber,aes(x=Pickup.point) )+geom_bar()
#****************************************************************************************************************************
#PLOT:2 plot to visualise the frequency of requests that get cancelled, show 'no cars available' and Trip completed.
#****************************************************************************************************************************
ggplot(uber, aes(x=Status))+geom_bar(position = "stack")
#shows that No cars available is the major issue and as from the graph we can see that count of cancelled trip is mostly half of the no cars available
#****************************************************************************************************************************
#Bivariate Analysis
#Analysis of Pickup point and Status
#****************************************************************************************************************************
uber_summary<-aggregate(Request.id~Pickup.point+Status,uber,length)
#****************************************************************************************************************************
#PLOT:3 Plot Pickup.point vs count of request id with status
#****************************************************************************************************************************
ggplot(uber, aes(x=Pickup.point, fill=Status)) +geom_bar()
ggplot(uber, aes(x=Pickup.point, fill=Status)) +geom_bar(position = "fill")
#****************************************************************************************************************************
#PLOT:4 identify the most problematic types of requests (city to airport / airport to city etc.)
#****************************************************************************************************************************
ggplot(uber, aes(x=Status, fill = type_of_request))+geom_bar(position = "stack")
ggplot(uber, aes(x=Status, fill = type_of_request))+geom_bar(position = "fill")
# From Airport to city there are very less cancellations. But MAJOR PROBLEM is CARS UNAVAILABLE at Airport for pickup
# City to Airport: Lot of cancellations, But other hand trips completed to airport is high
#****************************************************************************************************************************
#PLOT:5 Plot of the weekday vs status
#****************************************************************************************************************************
ggplot(uber, aes(x=Request_weekday))+geom_bar(position = "stack")
ggplot(uber, aes(x=Request_weekday , fill=Status)) + geom_bar(position = "stack")
#****************************************************************************************************************************
#PLOT:6 Plot request_hour vs frequency, identify the most problematic types of requests over the Requested hour
#****************************************************************************************************************************
ggplot(uber, aes(x=Request_hour)) + geom_bar(position = "stack")
ggplot(uber, aes(x=Request_hour , fill=Status)) + geom_bar(position = "stack")
#In evenings the majority of the times problem is no cars available where in morning there is the increase in the number of the cancellations
#After observing the plot 6 we can conclude that the peak hour in morning are from 5 to 10am, in evening from 5 to 10pm there is huge increase in demand
# when we take plot 3 & plot 4 in consideration then it is observed that from the graphs that most of the trips got cancelled where the type of request
# is from city to airport and from plot 4 we can see that most cancellartion of the trip is there in morning
#which implies that there are more number ofcancellation from city-Airport and more number of time situation arises that no cars are available in evenings.
#****************************************************************************************************************************
#PLOT:7 Plot hour_of_the_day vs frequency of the request with Status
#****************************************************************************************************************************
ggplot(uber, aes(x=Hour_of_the_day)) + geom_bar(position = "stack")
ggplot(uber, aes(x=Hour_of_the_day, fill= Status)) + geom_bar(position = "stack")
#****************************************************************************************************************************
#PLOT:8 Plot hour_of_the_day vs frequency of the request with type of request
#****************************************************************************************************************************
ggplot(uber, aes(x=Hour_of_the_day, fill= type_of_request))+geom_bar(position = "stack")
#****************************************************************************************************************************
# Supply demand gap analysis
#
# Part- 1 Total Supply and demand
#****************************************************************************************************************************
Supply<-Total_Status$count[which(Total_Status$Status== "Trip Completed")]
demand<-sum(Total_Status$count)
Supply_demand_gap<-demand-Supply
SD_ratio<-Supply/demand
#the value of the supply demand ratio is 0.42 i.e. only 42% of the trip are getting completed company is losing out the revenue on 58% of the Requested trips
#****************************************************************************************************************************
#Part 2 (Pickup point-city)
#****************************************************************************************************************************
city_pickup<-subset(uber, Pickup.point=="City")
Supply_city<-nrow(subset(city_pickup, Status=="Trip Completed"))
demand_city<-nrow(city_pickup)
Supply_demand_gap_city<-demand_city-Supply_city
SD_ratio_city<-Supply_city/demand_city
#****************************************************************************************************************************
#Part 3 (Pickup point-Airport)
#****************************************************************************************************************************
Airport_pickup<-subset(uber, Pickup.point=="Airport")
Supply_airport<-nrow(subset(city_pickup, Status=="Trip Completed"))
demand_airport<-nrow(Airport_pickup)
Supply_demand_gap_airport<-demand_airport-Supply_airport
SD_ratio_Airport<-Supply_airport/demand_airport
#****************************************************************************************************************************
# Part 4: Total supply demand gap during peak_hrs(Evening_peak_hrs, morning_peak_hrs)
#****************************************************************************************************************************
peak_hrs<-subset(uber, Hour_of_the_day!="Normal_hrs")
Supply_peak_hrs<-nrow(subset(peak_hrs, Status=="Trip Completed"))
demand_peak_hrs<-nrow(peak_hrs)
Supply_demand_gap_peak_hrs<-demand_peak_hrs-Supply_peak_hrs
SD_ratio_peak_hrs<-Supply_peak_hrs/demand_peak_hrs
#****************************************************************************************************************************
#Part 5 supply demand gap during peak_hrs(Evening_peak_hrs, morning_peak_hrs) from City
#****************************************************************************************************************************
peak_hrs_city<-subset(city_pickup,Hour_of_the_day!="Normal_hrs")
Supply_peak_hrs_city<-nrow(subset(peak_hrs_city,Status=="Trip Completed"))
demand_peak_hrs_city<-nrow(peak_hrs_city)
Supply_demand_gap_peak_hrs_city<-demand_peak_hrs_city-Supply_peak_hrs_city
SD_ratio_peak_hrs_city<-Supply_peak_hrs_city/demand_peak_hrs_city
#****************************************************************************************************************************
#Part 6 supply demand gap during peak_hrs(Evening_peak_hrs, morning_peak_hrs) from Airport
#****************************************************************************************************************************
peak_hrs_airport<-subset(Airport_pickup,Hour_of_the_day!="Normal_hrs")
Supply_peak_hrs_airport<-nrow(subset(peak_hrs_airport,Status=="Trip Completed"))
demand_peak_hrs_airport<-nrow(peak_hrs_airport)
Supply_demand_gap_peak_hrs_airport<-demand_peak_hrs_airport-Supply_peak_hrs_airport
SD_ratio_peak_hrs_airport<-Supply_peak_hrs_airport/demand_peak_hrs_airport
#****************************************************************************************************************************
#TO convert the dataset into CSV format for the use in tableau
#****************************************************************************************************************************
write.csv(uber, file="C:\\Users\\abhishek.saxena02\\Documents\\BI-OLAP-LABSESSION\\INPUT\\uber.csv")
| /Uber_case_study.r | no_license | abhisheksaxenainfy/Data-science-projects | R | false | false | 13,885 | r | #********************************Uber project***********************************************************************
library(dplyr)
library(ggplot2)
library(tidyr)
library(lubridate)
require(lubridate)
library(stringr)
library(reshape)
#getwd()
#set working directory
#setwd("C:/Users/abhishek.saxena02/Documents/BI-OLAP-LABSESSION/INPUT")
# Loading dataset
uber <- read.csv("Uber Request Data.csv",stringsAsFactors = F)
summary(uber)
str(uber)
#****************************************************************************************************************************
#Data Cleaning (Suitable date format of R), converting the multiple date format into the
#default date format and saving into the Request.timestamp, Drop.timestamp respectively
#replacing the old Request.timestamp, drop.timestamp columns with formatted dated values.
#Handling of the NA values
#****************************************************************************************************************************
Ra<-as.POSIXct(uber$Request.timestamp, format = "%d-%m-%Y %H:%M:%S", tz = "GMT")
Rb<-as.POSIXct(uber$Request.timestamp, format = "%d/%m/%Y %H:%M", tz = "GMT")
Ra[is.na(Ra)] <- Rb[!is.na(Rb)]
typeof(Ra)
uber$Request.timestamp<-Ra
dummy_Drop.timestamp<-uber$Drop.timestamp[1:2831]
Da<-as.POSIXct(dummy_Drop.timestamp, format = "%d-%m-%Y %H:%M:%S", tz = "GMT" )
Db<-as.POSIXct(dummy_Drop.timestamp, format = "%d/%m/%Y %H:%M", tz = "GMT")
Da[is.na(Da)] <- Db[!is.na(Db)]
nrow(uber)
Da[2832:6745]<-NA
uber$Drop.timestamp<-Da
View(uber)
#****************************************************************************************************************************
# Request_hour, Derived a new column to get the insights on the specific time of the day
#****************************************************************************************************************************
uber$Request_hour<-format(uber$Request.timestamp, "%H")
uber$Request_hour<-as.numeric(uber$Request_hour)
#****************************************************************************************************************************
#weekday, Derived a new column to get the insights on the day of the week
#****************************************************************************************************************************
uber$Request_weekday<-weekdays(uber$Request.timestamp)
#****************************************************************************************************************************
#Funtion to derive the new column type of request
#Type of Request, from Airport-city or City-Airport
#****************************************************************************************************************************
Request_type<- function(a) {
if (a[]=="Airport"){
x<- paste(a[], "City", sep="-")
}else {x<- paste(a[], "Airport", sep="-")}
}
uber$type_of_request <-sapply(uber$Pickup.point, Request_type)
#****************************************************************************************************************************
#Function to derive the peak hour column based on the hour of the day
#Hour_of_the_day
#****************************************************************************************************************************
hour_fun <- function(a) {
if (a[]>=5 & a[]<10)
{
x<-"morning_peak_hrs"
}
else if (a[]>=17 & a[]<=22)
{
x<-"Evening_peak_hrs"
}
else{
x<-"Normal_hrs"
}
}
uber$Hour_of_the_day<-sapply(uber$Request_hour, hour_fun)
#****************************************************************************************************************************
#Univariate analysis
#For Status column
#percentage of cancelled request
#percentage of No cars available request
#percentage of No cars available request
#pie chart representation for the request
#****************************************************************************************************************************
select(uber, Status)%>% group_by(Status)%>%summarise(count=n())->Total_Status
View(Total_Status)
cancelled_percent<-(Total_Status$count[which(Total_Status$Status== "Cancelled")]/sum(Total_Status$count))*100
No_Cars_Available_percent<-(Total_Status$count[which(Total_Status$Status== "No Cars Available")]/sum(Total_Status$count))*100
Trip_Completed_percent<-(Total_Status$count[which(Total_Status$Status== "Trip Completed")]/sum(Total_Status$count))*100
pie(Total_Status$count, labels = Total_Status$Status, main = "Pie chart for the request")
#****************************************************************************************************************************
#PLOT:1 Plot of pickup point vs frequency or count of the request and with status
#****************************************************************************************************************************
ggplot(uber,aes(x=Pickup.point) )+geom_bar()
#****************************************************************************************************************************
#PLOT:2 plot to visualise the frequency of requests that get cancelled, show 'no cars available' and Trip completed.
#****************************************************************************************************************************
ggplot(uber, aes(x=Status))+geom_bar(position = "stack")
#shows that No cars available is the major issue and as from the graph we can see that count of cancelled trip is mostly half of the no cars available
#****************************************************************************************************************************
#Bivariate Analysis
#Analysis of Pickup point and Status
#****************************************************************************************************************************
uber_summary<-aggregate(Request.id~Pickup.point+Status,uber,length)
#****************************************************************************************************************************
#PLOT:3 Plot Pickup.point vs count of request id with status
#****************************************************************************************************************************
ggplot(uber, aes(x=Pickup.point, fill=Status)) +geom_bar()
ggplot(uber, aes(x=Pickup.point, fill=Status)) +geom_bar(position = "fill")
#****************************************************************************************************************************
#PLOT:4 identify the most problematic types of requests (city to airport / airport to city etc.)
#****************************************************************************************************************************
ggplot(uber, aes(x=Status, fill = type_of_request))+geom_bar(position = "stack")
ggplot(uber, aes(x=Status, fill = type_of_request))+geom_bar(position = "fill")
# From Airport to city there are very less cancellations. But MAJOR PROBLEM is CARS UNAVAILABLE at Airport for pickup
# City to Airport: Lot of cancellations, But other hand trips completed to airport is high
#****************************************************************************************************************************
#PLOT:5 Plot of the weekday vs status
#****************************************************************************************************************************
ggplot(uber, aes(x=Request_weekday))+geom_bar(position = "stack")
ggplot(uber, aes(x=Request_weekday , fill=Status)) + geom_bar(position = "stack")
#****************************************************************************************************************************
#PLOT:6 Plot request_hour vs frequency, identify the most problematic types of requests over the Requested hour
#****************************************************************************************************************************
ggplot(uber, aes(x=Request_hour)) + geom_bar(position = "stack")
ggplot(uber, aes(x=Request_hour , fill=Status)) + geom_bar(position = "stack")
#In evenings the majority of the times problem is no cars available where in morning there is the increase in the number of the cancellations
#After observing the plot 6 we can conclude that the peak hour in morning are from 5 to 10am, in evening from 5 to 10pm there is huge increase in demand
# when we take plot 3 & plot 4 in consideration then it is observed that from the graphs that most of the trips got cancelled where the type of request
# is from city to airport and from plot 4 we can see that most cancellartion of the trip is there in morning
#which implies that there are more number ofcancellation from city-Airport and more number of time situation arises that no cars are available in evenings.
#****************************************************************************************************************************
#PLOT:7 Plot hour_of_the_day vs frequency of the request with Status
#****************************************************************************************************************************
ggplot(uber, aes(x=Hour_of_the_day)) + geom_bar(position = "stack")
ggplot(uber, aes(x=Hour_of_the_day, fill= Status)) + geom_bar(position = "stack")
#****************************************************************************************************************************
#PLOT:8 Plot hour_of_the_day vs frequency of the request with type of request
#****************************************************************************************************************************
ggplot(uber, aes(x=Hour_of_the_day, fill= type_of_request))+geom_bar(position = "stack")
#****************************************************************************************************************************
# Supply demand gap analysis
#
# Part- 1 Total Supply and demand
#****************************************************************************************************************************
Supply<-Total_Status$count[which(Total_Status$Status== "Trip Completed")]
demand<-sum(Total_Status$count)
Supply_demand_gap<-demand-Supply
SD_ratio<-Supply/demand
#the value of the supply demand ratio is 0.42 i.e. only 42% of the trip are getting completed company is losing out the revenue on 58% of the Requested trips
#****************************************************************************************************************************
#Part 2 (Pickup point-city)
#****************************************************************************************************************************
city_pickup<-subset(uber, Pickup.point=="City")
Supply_city<-nrow(subset(city_pickup, Status=="Trip Completed"))
demand_city<-nrow(city_pickup)
Supply_demand_gap_city<-demand_city-Supply_city
SD_ratio_city<-Supply_city/demand_city
#****************************************************************************************************************************
#Part 3 (Pickup point-Airport)
#****************************************************************************************************************************
Airport_pickup<-subset(uber, Pickup.point=="Airport")
Supply_airport<-nrow(subset(city_pickup, Status=="Trip Completed"))
demand_airport<-nrow(Airport_pickup)
Supply_demand_gap_airport<-demand_airport-Supply_airport
SD_ratio_Airport<-Supply_airport/demand_airport
#****************************************************************************************************************************
# Part 4: Total supply demand gap during peak_hrs(Evening_peak_hrs, morning_peak_hrs)
#****************************************************************************************************************************
peak_hrs<-subset(uber, Hour_of_the_day!="Normal_hrs")
Supply_peak_hrs<-nrow(subset(peak_hrs, Status=="Trip Completed"))
demand_peak_hrs<-nrow(peak_hrs)
Supply_demand_gap_peak_hrs<-demand_peak_hrs-Supply_peak_hrs
SD_ratio_peak_hrs<-Supply_peak_hrs/demand_peak_hrs
#****************************************************************************************************************************
#Part 5 supply demand gap during peak_hrs(Evening_peak_hrs, morning_peak_hrs) from City
#****************************************************************************************************************************
peak_hrs_city<-subset(city_pickup,Hour_of_the_day!="Normal_hrs")
Supply_peak_hrs_city<-nrow(subset(peak_hrs_city,Status=="Trip Completed"))
demand_peak_hrs_city<-nrow(peak_hrs_city)
Supply_demand_gap_peak_hrs_city<-demand_peak_hrs_city-Supply_peak_hrs_city
SD_ratio_peak_hrs_city<-Supply_peak_hrs_city/demand_peak_hrs_city
#****************************************************************************************************************************
#Part 6 supply demand gap during peak_hrs(Evening_peak_hrs, morning_peak_hrs) from Airport
#****************************************************************************************************************************
peak_hrs_airport<-subset(Airport_pickup,Hour_of_the_day!="Normal_hrs")
Supply_peak_hrs_airport<-nrow(subset(peak_hrs_airport,Status=="Trip Completed"))
demand_peak_hrs_airport<-nrow(peak_hrs_airport)
Supply_demand_gap_peak_hrs_airport<-demand_peak_hrs_airport-Supply_peak_hrs_airport
SD_ratio_peak_hrs_airport<-Supply_peak_hrs_airport/demand_peak_hrs_airport
#****************************************************************************************************************************
#TO convert the dataset into CSV format for the use in tableau
#****************************************************************************************************************************
write.csv(uber, file="C:\\Users\\abhishek.saxena02\\Documents\\BI-OLAP-LABSESSION\\INPUT\\uber.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/5.pipelines.R
\name{pipeline_list_project}
\alias{pipeline_list_project}
\title{Returns a list of all the pipelines in project}
\usage{
pipeline_list_project(auth_token = NULL, project_id = NULL, ...)
}
\arguments{
\item{auth_token}{auth token}
\item{project_id}{ID of a project you want to access.}
\item{...}{parameters passed to sbgapi function}
}
\value{
parsed list of the returned json
}
\description{
Returns a list of all the pipelines in project.
}
\examples{
token = '420b4672ebfc43bab48dc0d18a32fb6f'
\donttest{req = pipeline_list_project(token,
project_id = 'b0b3a611-6bb0-47e5-add7-a83402cf7858')}
}
| /man/pipeline_list_project.Rd | permissive | seandavi/sbgr | R | false | true | 706 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/5.pipelines.R
\name{pipeline_list_project}
\alias{pipeline_list_project}
\title{Returns a list of all the pipelines in project}
\usage{
pipeline_list_project(auth_token = NULL, project_id = NULL, ...)
}
\arguments{
\item{auth_token}{auth token}
\item{project_id}{ID of a project you want to access.}
\item{...}{parameters passed to sbgapi function}
}
\value{
parsed list of the returned json
}
\description{
Returns a list of all the pipelines in project.
}
\examples{
token = '420b4672ebfc43bab48dc0d18a32fb6f'
\donttest{req = pipeline_list_project(token,
project_id = 'b0b3a611-6bb0-47e5-add7-a83402cf7858')}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vault.R
\name{Vault.files}
\alias{Vault.files}
\title{Vault.files}
\usage{
Vault.files(id, ...)
}
\arguments{
\item{id}{The ID of the vault.}
\item{...}{(optional) Additional query parameters (e.g. limit, offset).}
}
\description{
Retrieves all files in a specific vault.
}
\examples{
\dontrun{
vault = Vault.get_personal_vault()
Vault.files(vault$id)
}
}
\references{
\url{https://docs.solvebio.com/}
}
| /man/Vault.files.Rd | no_license | stevekm/solvebio-r | R | false | true | 484 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vault.R
\name{Vault.files}
\alias{Vault.files}
\title{Vault.files}
\usage{
Vault.files(id, ...)
}
\arguments{
\item{id}{The ID of the vault.}
\item{...}{(optional) Additional query parameters (e.g. limit, offset).}
}
\description{
Retrieves all files in a specific vault.
}
\examples{
\dontrun{
vault = Vault.get_personal_vault()
Vault.files(vault$id)
}
}
\references{
\url{https://docs.solvebio.com/}
}
|
#' Create a new Special
#'
#' @description
#' Describes the result of uploading an image resource
#'
#' @docType class
#' @title Special
#' @description Special Class
#' @format An \code{R6Class} generator object
#' @field set_test list(character) [optional]
#' @field item_self integer [optional]
#' @field item_private character [optional]
#' @field item_super character [optional]
#' @field 123_number character [optional]
#' @field array[test] character [optional]
#' @field empty_string character [optional]
#' @field _field_list a list of fields list(character)
#' @field additional_properties additional properties list(character) [optional]
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
Special <- R6::R6Class(
"Special",
public = list(
`set_test` = NULL,
`item_self` = NULL,
`item_private` = NULL,
`item_super` = NULL,
`123_number` = NULL,
`array[test]` = NULL,
`empty_string` = NULL,
`_field_list` = c("set_test", "item_self", "item_private", "item_super", "123_number", "array[test]", "empty_string"),
`additional_properties` = list(),
#' Initialize a new Special class.
#'
#' @description
#' Initialize a new Special class.
#'
#' @param set_test set_test
#' @param item_self item_self
#' @param item_private item_private
#' @param item_super item_super
#' @param 123_number 123_number
#' @param array[test] array[test]
#' @param empty_string empty_string
#' @param additional_properties additonal properties (optional)
#' @param ... Other optional arguments.
#' @export
initialize = function(
`set_test` = NULL, `item_self` = NULL, `item_private` = NULL, `item_super` = NULL, `123_number` = NULL, `array[test]` = NULL, `empty_string` = NULL, additional_properties = NULL, ...
) {
if (!is.null(`set_test`)) {
stopifnot(is.vector(`set_test`), length(`set_test`) != 0)
sapply(`set_test`, function(x) stopifnot(is.character(x)))
if (!identical(`set_test`, unique(`set_test`))) {
stop("Error! Items in `set_test` are not unique.")
}
self$`set_test` <- `set_test`
}
if (!is.null(`item_self`)) {
stopifnot(is.numeric(`item_self`), length(`item_self`) == 1)
self$`item_self` <- `item_self`
}
if (!is.null(`item_private`)) {
stopifnot(is.character(`item_private`), length(`item_private`) == 1)
self$`item_private` <- `item_private`
}
if (!is.null(`item_super`)) {
stopifnot(is.character(`item_super`), length(`item_super`) == 1)
self$`item_super` <- `item_super`
}
if (!is.null(`123_number`)) {
stopifnot(is.character(`123_number`), length(`123_number`) == 1)
self$`123_number` <- `123_number`
}
if (!is.null(`array[test]`)) {
stopifnot(is.character(`array[test]`), length(`array[test]`) == 1)
self$`array[test]` <- `array[test]`
}
if (!is.null(`empty_string`)) {
stopifnot(is.character(`empty_string`), length(`empty_string`) == 1)
self$`empty_string` <- `empty_string`
}
if (!is.null(additional_properties)) {
for (key in names(additional_properties)) {
self$additional_properties[[key]] <- additional_properties[[key]]
}
}
},
#' To JSON string
#'
#' @description
#' To JSON String
#'
#' @return Special in JSON format
#' @export
toJSON = function() {
SpecialObject <- list()
if (!is.null(self$`set_test`)) {
SpecialObject[["set_test"]] <-
self$`set_test`
}
if (!is.null(self$`item_self`)) {
SpecialObject[["self"]] <-
self$`item_self`
}
if (!is.null(self$`item_private`)) {
SpecialObject[["private"]] <-
self$`item_private`
}
if (!is.null(self$`item_super`)) {
SpecialObject[["super"]] <-
self$`item_super`
}
if (!is.null(self$`123_number`)) {
SpecialObject[["123_number"]] <-
self$`123_number`
}
if (!is.null(self$`array[test]`)) {
SpecialObject[["array[test]"]] <-
self$`array[test]`
}
if (!is.null(self$`empty_string`)) {
SpecialObject[["empty_string"]] <-
self$`empty_string`
}
for (key in names(self$additional_properties)) {
SpecialObject[[key]] <- self$additional_properties[[key]]
}
SpecialObject
},
#' Deserialize JSON string into an instance of Special
#'
#' @description
#' Deserialize JSON string into an instance of Special
#'
#' @param input_json the JSON input
#' @return the instance of Special
#' @export
fromJSON = function(input_json) {
this_object <- jsonlite::fromJSON(input_json)
if (!is.null(this_object$`set_test`)) {
self$`set_test` <- ApiClient$new()$deserializeObj(this_object$`set_test`, "set[character]", loadNamespace("petstore"))
if (!identical(self$`set_test`, unique(self$`set_test`))) {
stop("Error! Items in `set_test` are not unique.")
}
}
if (!is.null(this_object$`self`)) {
self$`item_self` <- this_object$`self`
}
if (!is.null(this_object$`private`)) {
self$`item_private` <- this_object$`private`
}
if (!is.null(this_object$`super`)) {
self$`item_super` <- this_object$`super`
}
if (!is.null(this_object$`123_number`)) {
self$`123_number` <- this_object$`123_number`
}
if (!is.null(this_object$`array[test]`)) {
self$`array[test]` <- this_object$`array[test]`
}
if (!is.null(this_object$`empty_string`)) {
self$`empty_string` <- this_object$`empty_string`
}
# process additional properties/fields in the payload
for (key in names(this_object)) {
if (!(key %in% self$`_field_list`)) { # json key not in list of fields
self$additional_properties[[key]] <- this_object[[key]]
}
}
self
},
#' To JSON string
#'
#' @description
#' To JSON String
#'
#' @return Special in JSON format
#' @export
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`set_test`)) {
sprintf(
'"set_test":
[%s]
',
paste(unlist(lapply(self$`set_test`, function(x) paste0('"', x, '"'))), collapse = ",")
)
},
if (!is.null(self$`item_self`)) {
sprintf(
'"self":
%d
',
self$`item_self`
)
},
if (!is.null(self$`item_private`)) {
sprintf(
'"private":
"%s"
',
self$`item_private`
)
},
if (!is.null(self$`item_super`)) {
sprintf(
'"super":
"%s"
',
self$`item_super`
)
},
if (!is.null(self$`123_number`)) {
sprintf(
'"123_number":
"%s"
',
self$`123_number`
)
},
if (!is.null(self$`array[test]`)) {
sprintf(
'"array[test]":
"%s"
',
self$`array[test]`
)
},
if (!is.null(self$`empty_string`)) {
sprintf(
'"empty_string":
"%s"
',
self$`empty_string`
)
}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
json_string <- as.character(jsonlite::minify(paste("{", jsoncontent, "}", sep = "")))
json_obj <- jsonlite::fromJSON(json_string)
for (key in names(self$additional_properties)) {
json_obj[[key]] <- self$additional_properties[[key]]
}
json_string <- as.character(jsonlite::minify(jsonlite::toJSON(json_obj, auto_unbox = TRUE, digits = NA)))
},
#' Deserialize JSON string into an instance of Special
#'
#' @description
#' Deserialize JSON string into an instance of Special
#'
#' @param input_json the JSON input
#' @return the instance of Special
#' @export
fromJSONString = function(input_json) {
this_object <- jsonlite::fromJSON(input_json)
self$`set_test` <- ApiClient$new()$deserializeObj(this_object$`set_test`, "set[character]", loadNamespace("petstore"))
if (!identical(self$`set_test`, unique(self$`set_test`))) {
stop("Error! Items in `set_test` are not unique.")
}
self$`item_self` <- this_object$`item_self`
self$`item_private` <- this_object$`item_private`
self$`item_super` <- this_object$`item_super`
self$`123_number` <- this_object$`123_number`
self$`array[test]` <- this_object$`array[test]`
self$`empty_string` <- this_object$`empty_string`
# process additional properties/fields in the payload
for (key in names(this_object)) {
if (!(key %in% self$`_field_list`)) { # json key not in list of fields
self$additional_properties[[key]] <- this_object[[key]]
}
}
self
},
#' Validate JSON input with respect to Special
#'
#' @description
#' Validate JSON input with respect to Special and throw an exception if invalid
#'
#' @param input the JSON input
#' @export
validateJSON = function(input) {
input_json <- jsonlite::fromJSON(input)
},
#' To string (JSON format)
#'
#' @description
#' To string (JSON format)
#'
#' @return String representation of Special
#' @export
toString = function() {
self$toJSONString()
},
#' Return true if the values in all fields are valid.
#'
#' @description
#' Return true if the values in all fields are valid.
#'
#' @return true if the values in all fields are valid.
#' @export
isValid = function() {
TRUE
},
#' Return a list of invalid fields (if any).
#'
#' @description
#' Return a list of invalid fields (if any).
#'
#' @return A list of invalid fields (if any).
#' @export
getInvalidFields = function() {
invalid_fields <- list()
invalid_fields
},
#' Print the object
#'
#' @description
#' Print the object
#'
#' @export
print = function() {
print(jsonlite::prettify(self$toJSONString()))
invisible(self)
}),
# Lock the class to prevent modifications to the method or field
lock_class = TRUE
)
## Uncomment below to unlock the class to allow modifications of the method or field
#Special$unlock()
#
## Below is an example to define the print fnuction
#Special$set("public", "print", function(...) {
# print(jsonlite::prettify(self$toJSONString()))
# invisible(self)
#})
## Uncomment below to lock the class to prevent modifications to the method or field
#Special$lock()
| /samples/client/petstore/R-httr2-wrapper/R/special.R | permissive | padamstx/openapi-generator | R | false | false | 11,005 | r | #' Create a new Special
#'
#' @description
#' Describes the result of uploading an image resource
#'
#' @docType class
#' @title Special
#' @description Special Class
#' @format An \code{R6Class} generator object
#' @field set_test list(character) [optional]
#' @field item_self integer [optional]
#' @field item_private character [optional]
#' @field item_super character [optional]
#' @field 123_number character [optional]
#' @field array[test] character [optional]
#' @field empty_string character [optional]
#' @field _field_list a list of fields list(character)
#' @field additional_properties additional properties list(character) [optional]
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
Special <- R6::R6Class(
"Special",
public = list(
`set_test` = NULL,
`item_self` = NULL,
`item_private` = NULL,
`item_super` = NULL,
`123_number` = NULL,
`array[test]` = NULL,
`empty_string` = NULL,
`_field_list` = c("set_test", "item_self", "item_private", "item_super", "123_number", "array[test]", "empty_string"),
`additional_properties` = list(),
#' Initialize a new Special class.
#'
#' @description
#' Initialize a new Special class.
#'
#' @param set_test set_test
#' @param item_self item_self
#' @param item_private item_private
#' @param item_super item_super
#' @param 123_number 123_number
#' @param array[test] array[test]
#' @param empty_string empty_string
#' @param additional_properties additonal properties (optional)
#' @param ... Other optional arguments.
#' @export
initialize = function(
`set_test` = NULL, `item_self` = NULL, `item_private` = NULL, `item_super` = NULL, `123_number` = NULL, `array[test]` = NULL, `empty_string` = NULL, additional_properties = NULL, ...
) {
if (!is.null(`set_test`)) {
stopifnot(is.vector(`set_test`), length(`set_test`) != 0)
sapply(`set_test`, function(x) stopifnot(is.character(x)))
if (!identical(`set_test`, unique(`set_test`))) {
stop("Error! Items in `set_test` are not unique.")
}
self$`set_test` <- `set_test`
}
if (!is.null(`item_self`)) {
stopifnot(is.numeric(`item_self`), length(`item_self`) == 1)
self$`item_self` <- `item_self`
}
if (!is.null(`item_private`)) {
stopifnot(is.character(`item_private`), length(`item_private`) == 1)
self$`item_private` <- `item_private`
}
if (!is.null(`item_super`)) {
stopifnot(is.character(`item_super`), length(`item_super`) == 1)
self$`item_super` <- `item_super`
}
if (!is.null(`123_number`)) {
stopifnot(is.character(`123_number`), length(`123_number`) == 1)
self$`123_number` <- `123_number`
}
if (!is.null(`array[test]`)) {
stopifnot(is.character(`array[test]`), length(`array[test]`) == 1)
self$`array[test]` <- `array[test]`
}
if (!is.null(`empty_string`)) {
stopifnot(is.character(`empty_string`), length(`empty_string`) == 1)
self$`empty_string` <- `empty_string`
}
if (!is.null(additional_properties)) {
for (key in names(additional_properties)) {
self$additional_properties[[key]] <- additional_properties[[key]]
}
}
},
#' To JSON string
#'
#' @description
#' To JSON String
#'
#' @return Special in JSON format
#' @export
toJSON = function() {
SpecialObject <- list()
if (!is.null(self$`set_test`)) {
SpecialObject[["set_test"]] <-
self$`set_test`
}
if (!is.null(self$`item_self`)) {
SpecialObject[["self"]] <-
self$`item_self`
}
if (!is.null(self$`item_private`)) {
SpecialObject[["private"]] <-
self$`item_private`
}
if (!is.null(self$`item_super`)) {
SpecialObject[["super"]] <-
self$`item_super`
}
if (!is.null(self$`123_number`)) {
SpecialObject[["123_number"]] <-
self$`123_number`
}
if (!is.null(self$`array[test]`)) {
SpecialObject[["array[test]"]] <-
self$`array[test]`
}
if (!is.null(self$`empty_string`)) {
SpecialObject[["empty_string"]] <-
self$`empty_string`
}
for (key in names(self$additional_properties)) {
SpecialObject[[key]] <- self$additional_properties[[key]]
}
SpecialObject
},
#' Deserialize JSON string into an instance of Special
#'
#' @description
#' Deserialize JSON string into an instance of Special
#'
#' @param input_json the JSON input
#' @return the instance of Special
#' @export
fromJSON = function(input_json) {
this_object <- jsonlite::fromJSON(input_json)
if (!is.null(this_object$`set_test`)) {
self$`set_test` <- ApiClient$new()$deserializeObj(this_object$`set_test`, "set[character]", loadNamespace("petstore"))
if (!identical(self$`set_test`, unique(self$`set_test`))) {
stop("Error! Items in `set_test` are not unique.")
}
}
if (!is.null(this_object$`self`)) {
self$`item_self` <- this_object$`self`
}
if (!is.null(this_object$`private`)) {
self$`item_private` <- this_object$`private`
}
if (!is.null(this_object$`super`)) {
self$`item_super` <- this_object$`super`
}
if (!is.null(this_object$`123_number`)) {
self$`123_number` <- this_object$`123_number`
}
if (!is.null(this_object$`array[test]`)) {
self$`array[test]` <- this_object$`array[test]`
}
if (!is.null(this_object$`empty_string`)) {
self$`empty_string` <- this_object$`empty_string`
}
# process additional properties/fields in the payload
for (key in names(this_object)) {
if (!(key %in% self$`_field_list`)) { # json key not in list of fields
self$additional_properties[[key]] <- this_object[[key]]
}
}
self
},
#' To JSON string
#'
#' @description
#' To JSON String
#'
#' @return Special in JSON format
#' @export
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`set_test`)) {
sprintf(
'"set_test":
[%s]
',
paste(unlist(lapply(self$`set_test`, function(x) paste0('"', x, '"'))), collapse = ",")
)
},
if (!is.null(self$`item_self`)) {
sprintf(
'"self":
%d
',
self$`item_self`
)
},
if (!is.null(self$`item_private`)) {
sprintf(
'"private":
"%s"
',
self$`item_private`
)
},
if (!is.null(self$`item_super`)) {
sprintf(
'"super":
"%s"
',
self$`item_super`
)
},
if (!is.null(self$`123_number`)) {
sprintf(
'"123_number":
"%s"
',
self$`123_number`
)
},
if (!is.null(self$`array[test]`)) {
sprintf(
'"array[test]":
"%s"
',
self$`array[test]`
)
},
if (!is.null(self$`empty_string`)) {
sprintf(
'"empty_string":
"%s"
',
self$`empty_string`
)
}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
json_string <- as.character(jsonlite::minify(paste("{", jsoncontent, "}", sep = "")))
json_obj <- jsonlite::fromJSON(json_string)
for (key in names(self$additional_properties)) {
json_obj[[key]] <- self$additional_properties[[key]]
}
json_string <- as.character(jsonlite::minify(jsonlite::toJSON(json_obj, auto_unbox = TRUE, digits = NA)))
},
#' Deserialize JSON string into an instance of Special
#'
#' @description
#' Deserialize JSON string into an instance of Special
#'
#' @param input_json the JSON input
#' @return the instance of Special
#' @export
fromJSONString = function(input_json) {
this_object <- jsonlite::fromJSON(input_json)
self$`set_test` <- ApiClient$new()$deserializeObj(this_object$`set_test`, "set[character]", loadNamespace("petstore"))
if (!identical(self$`set_test`, unique(self$`set_test`))) {
stop("Error! Items in `set_test` are not unique.")
}
self$`item_self` <- this_object$`item_self`
self$`item_private` <- this_object$`item_private`
self$`item_super` <- this_object$`item_super`
self$`123_number` <- this_object$`123_number`
self$`array[test]` <- this_object$`array[test]`
self$`empty_string` <- this_object$`empty_string`
# process additional properties/fields in the payload
for (key in names(this_object)) {
if (!(key %in% self$`_field_list`)) { # json key not in list of fields
self$additional_properties[[key]] <- this_object[[key]]
}
}
self
},
#' Validate JSON input with respect to Special
#'
#' @description
#' Validate JSON input with respect to Special and throw an exception if invalid
#'
#' @param input the JSON input
#' @export
validateJSON = function(input) {
input_json <- jsonlite::fromJSON(input)
},
#' To string (JSON format)
#'
#' @description
#' To string (JSON format)
#'
#' @return String representation of Special
#' @export
toString = function() {
self$toJSONString()
},
#' Return true if the values in all fields are valid.
#'
#' @description
#' Return true if the values in all fields are valid.
#'
#' @return true if the values in all fields are valid.
#' @export
isValid = function() {
TRUE
},
#' Return a list of invalid fields (if any).
#'
#' @description
#' Return a list of invalid fields (if any).
#'
#' @return A list of invalid fields (if any).
#' @export
getInvalidFields = function() {
invalid_fields <- list()
invalid_fields
},
#' Print the object
#'
#' @description
#' Print the object
#'
#' @export
print = function() {
print(jsonlite::prettify(self$toJSONString()))
invisible(self)
}),
# Lock the class to prevent modifications to the method or field
lock_class = TRUE
)
## Uncomment below to unlock the class to allow modifications of the method or field
#Special$unlock()
#
## Below is an example to define the print fnuction
#Special$set("public", "print", function(...) {
# print(jsonlite::prettify(self$toJSONString()))
# invisible(self)
#})
## Uncomment below to lock the class to prevent modifications to the method or field
#Special$lock()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/plotLfcGC.R
\docType{methods}
\name{plotLfcGC}
\alias{plotLfcGC}
\title{Scatter plot between the GLM log2FC estimates and GC content.}
\usage{
plotLfcGC(sep, bsgenome = NULL, txdb = NULL, save_pdf_prefix = NULL,
fragment_length = 100, binding_length = 25, effective_GC = FALSE,
save_dir = ".")
\S4method{plotLfcGC}{SummarizedExomePeak}(sep, bsgenome = NULL,
txdb = NULL, save_pdf_prefix = NULL, fragment_length = 100,
binding_length = 25, effective_GC = FALSE, save_dir = ".")
}
\arguments{
\item{sep}{a \code{\link{summarizedExomePeak}} object.}
\item{bsgenome}{a \code{\link{BSgenome}} object for the genome sequence, it could be the name of the reference genome recognized by \code{\link{getBSgenom}}.}
\item{txdb}{a \code{\link{TxDb}} object for the transcript annotation, it could be the name of the reference genome recognized by \code{\link{makeTxDbFromUCSC}}.}
\item{save_pdf_prefix}{a \code{character}, if provided, a pdf file with the given name will be saved under the current directory; Default \code{= NULL}.}
\item{fragment_length}{a \code{numeric} value for the expected fragment length in the RNA-seq library; Default \code{= 100}.}
\item{binding_length}{a \code{numeric} value for the expected antibody binding length in IP samples; Default \code{= 25}.}
\item{save_dir}{a \code{character} for the directory to save the plot; default ".".}
\item{effective_gc}{a \code{logical} value of whether to calculate the weighted GC content by the probability of reads alignment; default \code{= FALSE}.}
}
\value{
a \code{ggplot} object.
}
\description{
\code{plotLfcGC} plot the scatter plot between GC content and the (differential) modification LFCs.
}
\details{
By default, this function will generate a scatter plot between GC content and the log2FC value.
The significant modification sites will be lebeled in different colours.
}
\examples{
\dontrun{
sep #a SummarizedExomePeak object
plotLfcGC(sep)
}
}
| /man/plotLfcGC.Rd | no_license | sunhaifeng123/exomePeak2 | R | false | true | 2,032 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/plotLfcGC.R
\docType{methods}
\name{plotLfcGC}
\alias{plotLfcGC}
\title{Scatter plot between the GLM log2FC estimates and GC content.}
\usage{
plotLfcGC(sep, bsgenome = NULL, txdb = NULL, save_pdf_prefix = NULL,
fragment_length = 100, binding_length = 25, effective_GC = FALSE,
save_dir = ".")
\S4method{plotLfcGC}{SummarizedExomePeak}(sep, bsgenome = NULL,
txdb = NULL, save_pdf_prefix = NULL, fragment_length = 100,
binding_length = 25, effective_GC = FALSE, save_dir = ".")
}
\arguments{
\item{sep}{a \code{\link{summarizedExomePeak}} object.}
\item{bsgenome}{a \code{\link{BSgenome}} object for the genome sequence, it could be the name of the reference genome recognized by \code{\link{getBSgenom}}.}
\item{txdb}{a \code{\link{TxDb}} object for the transcript annotation, it could be the name of the reference genome recognized by \code{\link{makeTxDbFromUCSC}}.}
\item{save_pdf_prefix}{a \code{character}, if provided, a pdf file with the given name will be saved under the current directory; Default \code{= NULL}.}
\item{fragment_length}{a \code{numeric} value for the expected fragment length in the RNA-seq library; Default \code{= 100}.}
\item{binding_length}{a \code{numeric} value for the expected antibody binding length in IP samples; Default \code{= 25}.}
\item{save_dir}{a \code{character} for the directory to save the plot; default ".".}
\item{effective_gc}{a \code{logical} value of whether to calculate the weighted GC content by the probability of reads alignment; default \code{= FALSE}.}
}
\value{
a \code{ggplot} object.
}
\description{
\code{plotLfcGC} plot the scatter plot between GC content and the (differential) modification LFCs.
}
\details{
By default, this function will generate a scatter plot between GC content and the log2FC value.
The significant modification sites will be lebeled in different colours.
}
\examples{
\dontrun{
sep #a SummarizedExomePeak object
plotLfcGC(sep)
}
}
|
library(survival)
library(survminer)
# library(plotly)
# library(dplyr)
library(ggthemes) # for colour palettes
source("plotly-survival.R")
gg_p <- ggsurvplot(survfit(Surv(time, status) ~sex, conf.type="plain", conf.int=0.95, data = lung), conf.int = T)
gg_p <- ggsurvplot(survfit(Surv(time, status) ~1, conf.type="plain", conf.int=0.95, data = lung), conf.int = T)
gg_p <- ggsurvplot(survfit(Surv(time, status) ~ph.ecog, conf.type="plain", conf.int=0.95, data = lung), conf.int = T)
plotly_survival(gg_p, strata_colours = tableau_color_pal("Tableau 10")(4),
plot_title="Survival of Something",
xaxis=list(title="days"),
yaxis=list(title="surv", hoverformat=".2f"),
plot_CIs = T,
CI_opacity = 0.25,
CI_linewidth = 0,
censor_size = 10,
showlegend_all=T)
# test edit | /examples.R | permissive | jakeybob/plotly-survival | R | false | false | 980 | r | library(survival)
library(survminer)
# library(plotly)
# library(dplyr)
library(ggthemes) # for colour palettes
source("plotly-survival.R")
gg_p <- ggsurvplot(survfit(Surv(time, status) ~sex, conf.type="plain", conf.int=0.95, data = lung), conf.int = T)
gg_p <- ggsurvplot(survfit(Surv(time, status) ~1, conf.type="plain", conf.int=0.95, data = lung), conf.int = T)
gg_p <- ggsurvplot(survfit(Surv(time, status) ~ph.ecog, conf.type="plain", conf.int=0.95, data = lung), conf.int = T)
plotly_survival(gg_p, strata_colours = tableau_color_pal("Tableau 10")(4),
plot_title="Survival of Something",
xaxis=list(title="days"),
yaxis=list(title="surv", hoverformat=".2f"),
plot_CIs = T,
CI_opacity = 0.25,
CI_linewidth = 0,
censor_size = 10,
showlegend_all=T)
# test edit |
source("function_miao.R")
t0=read.table("NCK1_data.txt",header=T)
t1=read.table("NCK1_background.txt",header=T)
i=1
##choose the array with replicate i
replicate_fore=t0[,9]
replicate_back=t1[,16]
t=t0[replicate_fore==i,]
t_b=t1[replicate_back==i,]
##select out the signal intensity from multiple PMT and match the names
SI_fore=t[,c(25,23,21,19,17,15)]
SI_back=t_b[,c(15,13,11,9,7,5)]
sat_fore=t[,c(24,22,20,18,16,14)]
sat_back=t_b[,c(14,12,10,8,6,4)]
names(SI_back)=names(SI_fore)
names(sat_back)=names(sat_fore)
##combine the foreground and background SI from the same array
SI=rbind(SI_fore,SI_back)
sat=rbind(sat_fore,sat_back)
########### Linear Scatter Uncorrected ###############################
################# lowest/hightest PMT Pairs###############################
par(mfrow=c(2,2))
plot(SI[,6],SI[,5],cex=0.5,xlab="PMT 450",ylab="PMT 550",main="Lowest PMT Pair (original scale)")
points(SI[sat[,5]>=1,2],SI[sat[,5]>=1,1],cex=0.5,col="red")
plot(SI[,2],SI[,1],cex=0.5,xlab="PMT 850",ylab="PMT 950",main="Highest PMT Pair (original scale)")
points(SI[sat[,1]>=1,2],SI[sat[,1]>=1,1],cex=0.5,col="red")
########### Log-log Scatter Uncorrected ###############################
################# lowest/hightest PMT Pairs###############################
plot(SI[,6],SI[,5],cex=0.5,xlab="PMT 450",ylab="PMT 550",main="Lowest PMT Pair (log scale)",log="xy")
points(SI[sat[,5]>=1,2],SI[sat[,5]>=1,1],cex=0.5,col="red")
plot(SI[,2],SI[,1],cex=0.5,xlab="PMT 850",ylab="PMT 950",main="Highest PMT Pair (log scale)",log="xy")
points(SI[sat[,1]>=1,2],SI[sat[,1]>=1,1],cex=0.5,col="red")
dev.copy(pdf,"scatter.pdf")
dev.off()
dev.off()
################# Linear Scatter Uncorrected ALL PMT Pairs#############################
plot(SI[,2],SI[,1],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="Globle Scatter Plot for ALL PMT Pairs",col=1,xlim=c(0,2*10^8),ylim=c(0,2*10^8))
for(i in 1:5){
points(SI[,6],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+1)
}
for(i in 1:4){
points(SI[,5],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+6)
}
for(i in 1:3){
points(SI[,4],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+10)
}
for(i in 1:2){
points(SI[,3],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+13)
}
dev.copy(pdf,"scatter2.pdf")
dev.off()
dev.off()
######## Linear Scatter Uncorrected ALL PMT Pairs (Near Origin)#############
plot(SI[,2],SI[,1],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="Local Scatter Plot for ALL PMT Pairs",col=1,xlim=c(0,10^6),ylim=c(0,10^6))
for(i in 1:5){
points(SI[,6],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+1)
}
for(i in 1:4){
points(SI[,5],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+6)
}
for(i in 1:3){
points(SI[,4],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+10)
}
for(i in 1:2){
points(SI[,3],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+13)
}
dev.copy(pdf,"scatter3.pdf")
dev.off()
dev.off()
#################### MA plot Uncorrected ###############################
################# lowest/hightest PMT Pairs###############################
par(mfrow=c(2,2))
plot(0.5*log(SI[,6])+0.5*log(SI[,5]),log(SI[,5])-log(SI[,6]),cex=0.5,xlab="A",ylab="M",main="Lowest PMT Pair")
points(0.5*log(SI[sat[,5]>=1,6])+0.5*log(SI[sat[,5]>=1,5]),log(SI[sat[,5]>=1,5])-log(SI[sat[,5]>=1,6]),cex=0.5,col="red")
plot(0.5*log(SI[,1])+0.5*log(SI[,2]),log(SI[,1])-log(SI[,2]),cex=0.5,xlab="A",ylab="M",main="Highest PMT Pair")
points(0.5*log(SI[sat[,1]>=1,1])+0.5*log(SI[sat[,1]>=1,2]),log(SI[sat[,1]>=1,1])-log(SI[sat[,1]>=1,2]),cex=0.5,col="red")
dev.copy(pdf,"MA_pair.pdf")
dev.off()
dev.off()
################# ALL PMT Pairs###############################
plot(0.5*log(SI[,2])+0.5*log(SI[,1]),log(SI[,1])-log(SI[,2]),cex=0.2,xlab="A",ylab="M",main="MA Plot for ALL PMT Pairs",col=1,xlim=c(10,20),ylim=c(-5,10))
for(i in 1:5){
points(0.5*log(SI[,6])+0.5*log(SI[,i]),log(SI[,i])-log(SI[,6]),cex=0.2,main="ALL PMT Pairs",col=i+1)
}
for(i in 1:4){
points(0.5*log(SI[,5])+0.5*log(SI[,i]),log(SI[,i])-log(SI[,5]),cex=0.2,main="ALL PMT Pairs",col=i+6)
}
for(i in 1:3){
points(0.5*log(SI[,4])+0.5*log(SI[,i]),log(SI[,i])-log(SI[,4]),cex=0.2,main="ALL PMT Pairs",col=i+10)
}
for(i in 1:2){
points(0.5*log(SI[,3])+0.5*log(SI[,i]),log(SI[,i])-log(SI[,3]),cex=0.2,main="ALL PMT Pairs",col=i+13)
}
dev.copy(pdf,"MA_all.pdf")
dev.off()
dev.off()
############################### Offset_corrected MA Plot ######################
preset=as.matrix(SI)/10^6
saturation=(sat>=1)
scan=multiPMT(preset,saturation)
offset_correct=scan$offset_correct*10^6
####corrected MA plot (with saturation points)##########################
plot(0.5*log(offset_correct[,2])+0.5*log(offset_correct[,1]),log(offset_correct[,1])-log(offset_correct[,2]),cex=0.2,xlab="A",ylab="M",main="MA Plot for All PMT Pairs",col=1,xlim=c(10,20),ylim=c(-5,10))
for(i in 1:5){
points(0.5*log(offset_correct[,6])+0.5*log(offset_correct[,i]),log(offset_correct[,i])-log(offset_correct[,6]),cex=0.2,main="ALL PMT Pairs",col=i+1)
}
for(i in 1:4){
points(0.5*log(offset_correct[,5])+0.5*log(offset_correct[,i]),log(offset_correct[,i])-log(offset_correct[,5]),cex=0.2,main="ALL PMT Pairs",col=i+6)
}
for(i in 1:3){
points(0.5*log(offset_correct[,4])+0.5*log(offset_correct[,i]),log(offset_correct[,i])-log(offset_correct[,4]),cex=0.2,main="ALL PMT Pairs",col=i+10)
}
for(i in 1:2){
points(0.5*log(offset_correct[,3])+0.5*log(offset_correct[,i]),log(offset_correct[,i])-log(offset_correct[,3]),cex=0.2,main="ALL PMT Pairs",col=i+13)
}
dev.copy(pdf,"MA_correct1.pdf")
dev.off()
dev.off()
############## Scatter plot after correction and imputation############################
par(mfrow=c(2,2))
plot(offset_correct[,6],offset_correct[,5],cex=0.5,xlab="PMT 450",ylab="PMT 550",main="Lowest PMT Pair (original scale)")
points(offset_correct[sat[,5]>=1,2],offset_correct[sat[,5]>=1,1],cex=0.5,col="red")
plot(offset_correct[,2],offset_correct[,1],cex=0.5,xlab="PMT 850",ylab="PMT 950",main="Highest PMT Pair (original scale)")
points(offset_correct[sat[,1]>=1,2],offset_correct[sat[,1]>=1,1],cex=0.5,col="red")
######################## on log scale############################
plot(offset_correct[,6],offset_correct[,5],cex=0.5,xlab="PMT 450",ylab="PMT 550",main="Lowest PMT Pair (log scale)",log="xy")
points(offset_correct[sat[,5]>=1,2],offset_correct[sat[,5]>=1,1],cex=0.5,col="red")
plot(offset_correct[,2],offset_correct[,1],cex=0.5,xlab="PMT 850",ylab="PMT 950",main="Highest PMT Pair (log scale)",log="xy")
points(offset_correct[sat[,1]>=1,2],offset_correct[sat[,1]>=1,1],cex=0.5,col="red")
dev.copy(pdf,"scatter_pair_corrected.pdf")
dev.off()
dev.off()
################# density #####################
par(mfrow=c(2,2))
plot(density(log(SI[,6])),ylim=c(0,1.5),main="density of raw SI",xlab=expression(log(SI)))
for (i in 1:5){
lines(density(log(SI[,i])))
}
plot(density(log(offset_correct[,6])),ylim=c(0,1.5),main="density of bias corrected SI",xlab=expression(log(SI-a_i)))
for (i in 1:5){
lines(density(log(offset_correct[,i])))
}
normalize=scan$normalize
plot(density(log(normalize[,6])),ylim=c(0,1.5),main="density of scale/bias corrected SI",xlab=expression(log(SI-a_i)/b_i))
for (i in 1:5){
lines(density(log(normalize[,i])))
}
dev.copy(pdf,"density.pdf")
dev.off()
dev.off()
########################## MA plot after bias/scale correction ###############################
plot(0.5*log(normalize[,2])+0.5*log(normalize[,1]),log(normalize[,1])-log(normalize[,2]),cex=0.2,xlab="A",ylab="M",main="MA Plot for ALL PMT Pairs",col=1,xlim=c(-1,10),ylim=c(-5,5))
for(i in 1:5){
points(0.5*log(normalize[,6])+0.5*log(normalize[,i]),log(normalize[,i])-log(normalize[,6]),cex=0.2,main="ALL PMT Pairs",col=i+1)
}
for(i in 1:4){
points(0.5*log(normalize[,5])+0.5*log(normalize[,i]),log(normalize[,i])-log(normalize[,5]),cex=0.2,main="ALL PMT Pairs",col=i+6)
}
for(i in 1:3){
points(0.5*log(normalize[,4])+0.5*log(normalize[,i]),log(normalize[,i])-log(normalize[,4]),cex=0.2,main="ALL PMT Pairs",col=i+10)
}
for(i in 1:2){
points(0.5*log(normalize[,3])+0.5*log(normalize[,i]),log(normalize[,i])-log(normalize[,3]),cex=0.2,main="ALL PMT Pairs",col=i+13)
}
dev.copy(pdf,"MA_offset_bias_corrected.pdf")
dev.off()
dev.off()
###################inter-array correction##############################
##before
t0=read.table("CBD_data.txt",header=T)
replicate=t0[,8]
BSV=t0[,27]
par(mfrow=c(2,2))
boxplot(BSV~replicate,log="y",main="before inter-array normalization")
plot(BSV[replicate==1],BSV[replicate==2],log="xy",cex=0.5,xlab="replicate 1",ylab="replicate 2")
abline(0,1)
plot(BSV[replicate==1],BSV[replicate==3],log="xy",cex=0.5,xlab="replicate 1",ylab="replicate 3")
abline(0,1)
plot(BSV[replicate==2],BSV[replicate==3],log="xy",cex=0.5,xlab="replicate 2",ylab="replicate 3")
abline(0,1)
dev.copy(pdf,"CBD_inter_array.pdf")
dev.off()
dev.off()
######after
replicate=t0[,8]
BSV=t0[,28]
par(mfrow=c(2,2))
boxplot(BSV~replicate,log="y",main="after inter-array normalization")
plot(BSV[replicate==1],BSV[replicate==2],log="xy",cex=0.5,xlab="replicate 1",ylab="replicate 2")
abline(0,1)
plot(BSV[replicate==1],BSV[replicate==3],log="xy",cex=0.5,xlab="replicate 1",ylab="replicate 3")
abline(0,1)
plot(BSV[replicate==2],BSV[replicate==3],log="xy",cex=0.5,xlab="replicate 2",ylab="replicate 3")
abline(0,1)
dev.copy(pdf,"CBD_inter_array_after.pdf")
dev.off()
dev.off()
#####intra-array normalization######################################
block=t0[,1]
block=as.factor(block)
levels(block)=c(1,3,5,7,9,11,2,4,6,8,10,12)
BSV=t0[,28]
boxplot(BSV~block,log="y",main="before intra-array normalization",xlab="Block",ylab="SI")
dev.copy(pdf,"intra_array_before.pdf")
dev.off()
dev.off()
BSV=t0[,29]
boxplot(BSV~block,log="y",main="after intra-array normalization",xlab="Block",ylab="SI")
dev.copy(pdf,"intra_array_after.pdf")
dev.off()
dev.off()
######################### curve fitting #######################
x=read.table("Abl1_data.txt",header=T)
names=c("RQLNpYIQVDLE","FESRpYQQPFED","PPALpYAEPLDS","PSSVpYVPDEWE","NGLNpYIDLDLV",
"SPGEpYVNIEFG","KEEGpYELPYNP","DDPSpYVNVQNL","EDDGpYDVPKPP","DQHDpYDSVASD","AEPQpYEEIPIY")
##corresponding ID
ID=matrix(rep(0,22),nrow=11)
ID[1,]=c("B02","P19")
ID[2,]=c("A23","P16")
ID[3,]=c("A24","P17")
ID[4,]=c("B03","P20")
ID[5,]=c("A21","P14")
ID[6,]=c("A19","P12")
ID[7,]=c("A20","P13")
ID[8,]=c("A12","P09")
ID[9,]=c("A09","P07")
ID[10,]=c("B13","P24")
ID[11,]=c("A16","P10")
######################### original scale (curve) #######################
##The first ID
i=1
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration (nM)",ylab="normalized SI",main="Curve Fitting (I)")
for (j in 1:11){
if ((j!=5)&(j!=3)){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
matrix(t1[,5],nrow=3,byrow=T)
y0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),4])
x0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),1])
reg2=nls(y0~a-log(1+exp(d-x0)),start=list(a=7,d=4))
##points(exp(x0),exp(y0),col=j,cex=0.5,pch=j)
a=coef(reg2)[1]
d=coef(reg2)[2]
curve(exp(llog(log(x),a,d)),from=0.5,to=5000,col=j,add=T,log="x")
}
}
legend("topright",col=c(1:2,4,6:11),lty=1,paste(names,ID[,1])[c(1:2,4,6:11)],cex=0.8)
dev.copy(pdf,"curve_fitting_1.pdf")
dev.off()
dev.off()
######################### the second ID #######################
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration(nM)",ylab="normalized SI",main="Curve Fitting (II)")
i=2
for (j in 1:11){
if (j!=5){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
matrix(t1[,5],nrow=3,byrow=T)
y0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),4])
x0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),1])
reg2=nls(y0~a-log(1+exp(d-x0)),start=list(a=7,d=4))
##points(exp(x0),exp(y0),col=j,cex=0.5,pch=j)
a=coef(reg2)[1]
d=coef(reg2)[2]
curve(exp(llog(log(x),a,d)),from=0.5,to=5000,col=j,add=T,log="x")
}
}
legend("topright",col=c(1:2,4:11),lty=1,paste(names,ID[,2])[c(1:2,4:11)],cex=0.8)
dev.copy(pdf,"curve_fitting_2.pdf")
dev.off()
dev.off()
######################### original scale (points) #######################
i=1
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration(nM)",ylab="normalized SI",main="Average Points")
con=c(5000.0,100.0,2500.0,50.0,1000.0,10.0,750.0,5.0,500.0,1.0,250.0,0.5)
for (j in 1:11){
if ((j!=5)&(j!=3)){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
t1[(t1[,5]==0)|(t1[,2]!=0),4]=NA
matrix=matrix(t1[,4],nrow=3,byrow=T)
y=colMeans(matrix,na.rm=T)
points(con,y,col=j,cex=0.5,pch=j)
}
}
legend("topright",col=c(1:2,4,6:11),pch=c(1:2,4,6:11),paste(names,ID[,1])[c(1:2,4,6:11)],cex=0.8)
dev.copy(pdf,"fitting_points_1.pdf")
dev.off()
dev.off()
##
i=2
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration(nM)",ylab="normalized SI",main="Average Points")
con=c(5000.0,100.0,2500.0,50.0,1000.0,10.0,750.0,5.0,500.0,1.0,250.0,0.5)
for (j in 1:11){
if (j!=5){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
t1[(t1[,5]==0)|(t1[,2]!=0),4]=NA
matrix=matrix(t1[,4],nrow=3,byrow=T)
y=colMeans(matrix,na.rm=T)
points(con,y,col=j,cex=0.5,pch=j)
}
}
legend("topright",col=c(1:4,6:11),pch=c(1:4,6:11),paste(names,ID[,1])[c(1:4,6:11)],cex=0.8)
dev.copy(pdf,"fitting_points (II).pdf")
dev.off()
dev.off()
######################### log scale (curve) #######################
i=1
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration(nM)",ylab="normalized SI",log="xy",main="fitting curve I (log scale)")
for (j in 1:11){
if ((j!=5)&(j!=3)){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
matrix(t1[,5],nrow=3,byrow=T)
y0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),4])
x0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),1])
reg2=nls(y0~a-log(1+exp(d-x0)),start=list(a=7,d=4))
##points(exp(x0),exp(y0),col=j,cex=0.5,pch=j)
a=coef(reg2)[1]
d=coef(reg2)[2]
curve(exp(llog(log(x),a,d)),from=0.5,to=5000,col=j,add=T,log="x")
}
}
legend("bottomright",col=c(1:2,4,6:11),lty=1,paste(names,ID[,1])[c(1:2,4,6:11)],cex=0.8)
dev.copy(pdf,"fitting_curve_log_1.pdf")
dev.off()
dev.off()
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",log="xy",main="Fitting Curve II (log scale)",xlab="Concentration",yalb="normalized SI")
i=2
for (j in 1:11){
if (j!=5){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
matrix(t1[,5],nrow=3,byrow=T)
y0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),4])
x0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),1])
reg2=nls(y0~a-log(1+exp(d-x0)),start=list(a=7,d=4))
##points(exp(x0),exp(y0),col=j,cex=0.5,pch=j)
a=coef(reg2)[1]
d=coef(reg2)[2]
curve(exp(llog(log(x),a,d)),from=0.5,to=5000,col=j,add=T,log="x")
}
}
legend("bottomright",col=c(1:2,4:11),lty=1,paste(names,ID[,2])[c(1:2,4:11)],cex=0.8)
dev.copy(pdf,"fitting_curve_log_2.pdf")
dev.off()
dev.off()
######################### log scale (average points) #######################
i=1
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration(nM)",log="xy",ylab="normalized SI",main="average points I (log scale)")
con=c(5000.0,100.0,2500.0,50.0,1000.0,10.0,750.0,5.0,500.0,1.0,250.0,0.5)
for (j in 1:11){
if ((j!=5)&(j!=3)){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
t1[(t1[,5]==0)|(t1[,2]!=0),4]=NA
matrix=matrix(t1[,4],nrow=3,byrow=T)
y=colMeans(matrix,na.rm=T)
points(con,y,col=j,cex=0.5,pch=j)
}
}
legend("bottomright",col=c(1:2,4,6:11),pch=c(1:2,4,6:11),paste(names,ID[,1])[c(1:2,4,6:11)],cex=0.8)
dev.copy(pdf,"fitting_points_log_1.pdf")
dev.off()
dev.off()
######points
i=2
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",log="xy",xlab="Concentration (nM)",ylab="normalized SI",main="average points II (log scale)")
con=c(5000.0,100.0,2500.0,50.0,1000.0,10.0,750.0,5.0,500.0,1.0,250.0,0.5)
for (j in 1:11){
if (j!=5){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
t1[(t1[,5]==0)|(t1[,2]!=0),4]=NA
matrix=matrix(t1[,4],nrow=3,byrow=T)
y=colMeans(matrix,na.rm=T)
points(con,y,col=j,cex=0.5,pch=j)
}
}
legend("bottomright",col=c(1:4,6:11),pch=c(1:4,6:11),paste(names,ID[,1])[c(1:4,6:11)],cex=0.8)
dev.copy(pdf,"fitting_points_log_2.pdf")
dev.off()
dev.off()
| /plot.R | no_license | bengalengel/CPCMAs | R | false | false | 16,202 | r | source("function_miao.R")
t0=read.table("NCK1_data.txt",header=T)
t1=read.table("NCK1_background.txt",header=T)
i=1
##choose the array with replicate i
replicate_fore=t0[,9]
replicate_back=t1[,16]
t=t0[replicate_fore==i,]
t_b=t1[replicate_back==i,]
##select out the signal intensity from multiple PMT and match the names
SI_fore=t[,c(25,23,21,19,17,15)]
SI_back=t_b[,c(15,13,11,9,7,5)]
sat_fore=t[,c(24,22,20,18,16,14)]
sat_back=t_b[,c(14,12,10,8,6,4)]
names(SI_back)=names(SI_fore)
names(sat_back)=names(sat_fore)
##combine the foreground and background SI from the same array
SI=rbind(SI_fore,SI_back)
sat=rbind(sat_fore,sat_back)
########### Linear Scatter Uncorrected ###############################
################# lowest/hightest PMT Pairs###############################
par(mfrow=c(2,2))
plot(SI[,6],SI[,5],cex=0.5,xlab="PMT 450",ylab="PMT 550",main="Lowest PMT Pair (original scale)")
points(SI[sat[,5]>=1,2],SI[sat[,5]>=1,1],cex=0.5,col="red")
plot(SI[,2],SI[,1],cex=0.5,xlab="PMT 850",ylab="PMT 950",main="Highest PMT Pair (original scale)")
points(SI[sat[,1]>=1,2],SI[sat[,1]>=1,1],cex=0.5,col="red")
########### Log-log Scatter Uncorrected ###############################
################# lowest/hightest PMT Pairs###############################
plot(SI[,6],SI[,5],cex=0.5,xlab="PMT 450",ylab="PMT 550",main="Lowest PMT Pair (log scale)",log="xy")
points(SI[sat[,5]>=1,2],SI[sat[,5]>=1,1],cex=0.5,col="red")
plot(SI[,2],SI[,1],cex=0.5,xlab="PMT 850",ylab="PMT 950",main="Highest PMT Pair (log scale)",log="xy")
points(SI[sat[,1]>=1,2],SI[sat[,1]>=1,1],cex=0.5,col="red")
dev.copy(pdf,"scatter.pdf")
dev.off()
dev.off()
################# Linear Scatter Uncorrected ALL PMT Pairs#############################
plot(SI[,2],SI[,1],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="Globle Scatter Plot for ALL PMT Pairs",col=1,xlim=c(0,2*10^8),ylim=c(0,2*10^8))
for(i in 1:5){
points(SI[,6],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+1)
}
for(i in 1:4){
points(SI[,5],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+6)
}
for(i in 1:3){
points(SI[,4],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+10)
}
for(i in 1:2){
points(SI[,3],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+13)
}
dev.copy(pdf,"scatter2.pdf")
dev.off()
dev.off()
######## Linear Scatter Uncorrected ALL PMT Pairs (Near Origin)#############
plot(SI[,2],SI[,1],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="Local Scatter Plot for ALL PMT Pairs",col=1,xlim=c(0,10^6),ylim=c(0,10^6))
for(i in 1:5){
points(SI[,6],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+1)
}
for(i in 1:4){
points(SI[,5],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+6)
}
for(i in 1:3){
points(SI[,4],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+10)
}
for(i in 1:2){
points(SI[,3],SI[,i],cex=0.2,xlab="Lower PMT",ylab="Higher PMT",main="ALL PMT Pairs",col=i+13)
}
dev.copy(pdf,"scatter3.pdf")
dev.off()
dev.off()
#################### MA plot Uncorrected ###############################
################# lowest/hightest PMT Pairs###############################
par(mfrow=c(2,2))
plot(0.5*log(SI[,6])+0.5*log(SI[,5]),log(SI[,5])-log(SI[,6]),cex=0.5,xlab="A",ylab="M",main="Lowest PMT Pair")
points(0.5*log(SI[sat[,5]>=1,6])+0.5*log(SI[sat[,5]>=1,5]),log(SI[sat[,5]>=1,5])-log(SI[sat[,5]>=1,6]),cex=0.5,col="red")
plot(0.5*log(SI[,1])+0.5*log(SI[,2]),log(SI[,1])-log(SI[,2]),cex=0.5,xlab="A",ylab="M",main="Highest PMT Pair")
points(0.5*log(SI[sat[,1]>=1,1])+0.5*log(SI[sat[,1]>=1,2]),log(SI[sat[,1]>=1,1])-log(SI[sat[,1]>=1,2]),cex=0.5,col="red")
dev.copy(pdf,"MA_pair.pdf")
dev.off()
dev.off()
################# ALL PMT Pairs###############################
plot(0.5*log(SI[,2])+0.5*log(SI[,1]),log(SI[,1])-log(SI[,2]),cex=0.2,xlab="A",ylab="M",main="MA Plot for ALL PMT Pairs",col=1,xlim=c(10,20),ylim=c(-5,10))
for(i in 1:5){
points(0.5*log(SI[,6])+0.5*log(SI[,i]),log(SI[,i])-log(SI[,6]),cex=0.2,main="ALL PMT Pairs",col=i+1)
}
for(i in 1:4){
points(0.5*log(SI[,5])+0.5*log(SI[,i]),log(SI[,i])-log(SI[,5]),cex=0.2,main="ALL PMT Pairs",col=i+6)
}
for(i in 1:3){
points(0.5*log(SI[,4])+0.5*log(SI[,i]),log(SI[,i])-log(SI[,4]),cex=0.2,main="ALL PMT Pairs",col=i+10)
}
for(i in 1:2){
points(0.5*log(SI[,3])+0.5*log(SI[,i]),log(SI[,i])-log(SI[,3]),cex=0.2,main="ALL PMT Pairs",col=i+13)
}
dev.copy(pdf,"MA_all.pdf")
dev.off()
dev.off()
############################### Offset_corrected MA Plot ######################
preset=as.matrix(SI)/10^6
saturation=(sat>=1)
scan=multiPMT(preset,saturation)
offset_correct=scan$offset_correct*10^6
####corrected MA plot (with saturation points)##########################
plot(0.5*log(offset_correct[,2])+0.5*log(offset_correct[,1]),log(offset_correct[,1])-log(offset_correct[,2]),cex=0.2,xlab="A",ylab="M",main="MA Plot for All PMT Pairs",col=1,xlim=c(10,20),ylim=c(-5,10))
for(i in 1:5){
points(0.5*log(offset_correct[,6])+0.5*log(offset_correct[,i]),log(offset_correct[,i])-log(offset_correct[,6]),cex=0.2,main="ALL PMT Pairs",col=i+1)
}
for(i in 1:4){
points(0.5*log(offset_correct[,5])+0.5*log(offset_correct[,i]),log(offset_correct[,i])-log(offset_correct[,5]),cex=0.2,main="ALL PMT Pairs",col=i+6)
}
for(i in 1:3){
points(0.5*log(offset_correct[,4])+0.5*log(offset_correct[,i]),log(offset_correct[,i])-log(offset_correct[,4]),cex=0.2,main="ALL PMT Pairs",col=i+10)
}
for(i in 1:2){
points(0.5*log(offset_correct[,3])+0.5*log(offset_correct[,i]),log(offset_correct[,i])-log(offset_correct[,3]),cex=0.2,main="ALL PMT Pairs",col=i+13)
}
dev.copy(pdf,"MA_correct1.pdf")
dev.off()
dev.off()
############## Scatter plot after correction and imputation############################
par(mfrow=c(2,2))
plot(offset_correct[,6],offset_correct[,5],cex=0.5,xlab="PMT 450",ylab="PMT 550",main="Lowest PMT Pair (original scale)")
points(offset_correct[sat[,5]>=1,2],offset_correct[sat[,5]>=1,1],cex=0.5,col="red")
plot(offset_correct[,2],offset_correct[,1],cex=0.5,xlab="PMT 850",ylab="PMT 950",main="Highest PMT Pair (original scale)")
points(offset_correct[sat[,1]>=1,2],offset_correct[sat[,1]>=1,1],cex=0.5,col="red")
######################## on log scale############################
plot(offset_correct[,6],offset_correct[,5],cex=0.5,xlab="PMT 450",ylab="PMT 550",main="Lowest PMT Pair (log scale)",log="xy")
points(offset_correct[sat[,5]>=1,2],offset_correct[sat[,5]>=1,1],cex=0.5,col="red")
plot(offset_correct[,2],offset_correct[,1],cex=0.5,xlab="PMT 850",ylab="PMT 950",main="Highest PMT Pair (log scale)",log="xy")
points(offset_correct[sat[,1]>=1,2],offset_correct[sat[,1]>=1,1],cex=0.5,col="red")
dev.copy(pdf,"scatter_pair_corrected.pdf")
dev.off()
dev.off()
################# density #####################
par(mfrow=c(2,2))
plot(density(log(SI[,6])),ylim=c(0,1.5),main="density of raw SI",xlab=expression(log(SI)))
for (i in 1:5){
lines(density(log(SI[,i])))
}
plot(density(log(offset_correct[,6])),ylim=c(0,1.5),main="density of bias corrected SI",xlab=expression(log(SI-a_i)))
for (i in 1:5){
lines(density(log(offset_correct[,i])))
}
normalize=scan$normalize
plot(density(log(normalize[,6])),ylim=c(0,1.5),main="density of scale/bias corrected SI",xlab=expression(log(SI-a_i)/b_i))
for (i in 1:5){
lines(density(log(normalize[,i])))
}
dev.copy(pdf,"density.pdf")
dev.off()
dev.off()
########################## MA plot after bias/scale correction ###############################
plot(0.5*log(normalize[,2])+0.5*log(normalize[,1]),log(normalize[,1])-log(normalize[,2]),cex=0.2,xlab="A",ylab="M",main="MA Plot for ALL PMT Pairs",col=1,xlim=c(-1,10),ylim=c(-5,5))
for(i in 1:5){
points(0.5*log(normalize[,6])+0.5*log(normalize[,i]),log(normalize[,i])-log(normalize[,6]),cex=0.2,main="ALL PMT Pairs",col=i+1)
}
for(i in 1:4){
points(0.5*log(normalize[,5])+0.5*log(normalize[,i]),log(normalize[,i])-log(normalize[,5]),cex=0.2,main="ALL PMT Pairs",col=i+6)
}
for(i in 1:3){
points(0.5*log(normalize[,4])+0.5*log(normalize[,i]),log(normalize[,i])-log(normalize[,4]),cex=0.2,main="ALL PMT Pairs",col=i+10)
}
for(i in 1:2){
points(0.5*log(normalize[,3])+0.5*log(normalize[,i]),log(normalize[,i])-log(normalize[,3]),cex=0.2,main="ALL PMT Pairs",col=i+13)
}
dev.copy(pdf,"MA_offset_bias_corrected.pdf")
dev.off()
dev.off()
###################inter-array correction##############################
##before
t0=read.table("CBD_data.txt",header=T)
replicate=t0[,8]
BSV=t0[,27]
par(mfrow=c(2,2))
boxplot(BSV~replicate,log="y",main="before inter-array normalization")
plot(BSV[replicate==1],BSV[replicate==2],log="xy",cex=0.5,xlab="replicate 1",ylab="replicate 2")
abline(0,1)
plot(BSV[replicate==1],BSV[replicate==3],log="xy",cex=0.5,xlab="replicate 1",ylab="replicate 3")
abline(0,1)
plot(BSV[replicate==2],BSV[replicate==3],log="xy",cex=0.5,xlab="replicate 2",ylab="replicate 3")
abline(0,1)
dev.copy(pdf,"CBD_inter_array.pdf")
dev.off()
dev.off()
######after
replicate=t0[,8]
BSV=t0[,28]
par(mfrow=c(2,2))
boxplot(BSV~replicate,log="y",main="after inter-array normalization")
plot(BSV[replicate==1],BSV[replicate==2],log="xy",cex=0.5,xlab="replicate 1",ylab="replicate 2")
abline(0,1)
plot(BSV[replicate==1],BSV[replicate==3],log="xy",cex=0.5,xlab="replicate 1",ylab="replicate 3")
abline(0,1)
plot(BSV[replicate==2],BSV[replicate==3],log="xy",cex=0.5,xlab="replicate 2",ylab="replicate 3")
abline(0,1)
dev.copy(pdf,"CBD_inter_array_after.pdf")
dev.off()
dev.off()
#####intra-array normalization######################################
block=t0[,1]
block=as.factor(block)
levels(block)=c(1,3,5,7,9,11,2,4,6,8,10,12)
BSV=t0[,28]
boxplot(BSV~block,log="y",main="before intra-array normalization",xlab="Block",ylab="SI")
dev.copy(pdf,"intra_array_before.pdf")
dev.off()
dev.off()
BSV=t0[,29]
boxplot(BSV~block,log="y",main="after intra-array normalization",xlab="Block",ylab="SI")
dev.copy(pdf,"intra_array_after.pdf")
dev.off()
dev.off()
######################### curve fitting #######################
x=read.table("Abl1_data.txt",header=T)
names=c("RQLNpYIQVDLE","FESRpYQQPFED","PPALpYAEPLDS","PSSVpYVPDEWE","NGLNpYIDLDLV",
"SPGEpYVNIEFG","KEEGpYELPYNP","DDPSpYVNVQNL","EDDGpYDVPKPP","DQHDpYDSVASD","AEPQpYEEIPIY")
##corresponding ID
ID=matrix(rep(0,22),nrow=11)
ID[1,]=c("B02","P19")
ID[2,]=c("A23","P16")
ID[3,]=c("A24","P17")
ID[4,]=c("B03","P20")
ID[5,]=c("A21","P14")
ID[6,]=c("A19","P12")
ID[7,]=c("A20","P13")
ID[8,]=c("A12","P09")
ID[9,]=c("A09","P07")
ID[10,]=c("B13","P24")
ID[11,]=c("A16","P10")
######################### original scale (curve) #######################
##The first ID
i=1
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration (nM)",ylab="normalized SI",main="Curve Fitting (I)")
for (j in 1:11){
if ((j!=5)&(j!=3)){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
matrix(t1[,5],nrow=3,byrow=T)
y0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),4])
x0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),1])
reg2=nls(y0~a-log(1+exp(d-x0)),start=list(a=7,d=4))
##points(exp(x0),exp(y0),col=j,cex=0.5,pch=j)
a=coef(reg2)[1]
d=coef(reg2)[2]
curve(exp(llog(log(x),a,d)),from=0.5,to=5000,col=j,add=T,log="x")
}
}
legend("topright",col=c(1:2,4,6:11),lty=1,paste(names,ID[,1])[c(1:2,4,6:11)],cex=0.8)
dev.copy(pdf,"curve_fitting_1.pdf")
dev.off()
dev.off()
######################### the second ID #######################
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration(nM)",ylab="normalized SI",main="Curve Fitting (II)")
i=2
for (j in 1:11){
if (j!=5){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
matrix(t1[,5],nrow=3,byrow=T)
y0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),4])
x0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),1])
reg2=nls(y0~a-log(1+exp(d-x0)),start=list(a=7,d=4))
##points(exp(x0),exp(y0),col=j,cex=0.5,pch=j)
a=coef(reg2)[1]
d=coef(reg2)[2]
curve(exp(llog(log(x),a,d)),from=0.5,to=5000,col=j,add=T,log="x")
}
}
legend("topright",col=c(1:2,4:11),lty=1,paste(names,ID[,2])[c(1:2,4:11)],cex=0.8)
dev.copy(pdf,"curve_fitting_2.pdf")
dev.off()
dev.off()
######################### original scale (points) #######################
i=1
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration(nM)",ylab="normalized SI",main="Average Points")
con=c(5000.0,100.0,2500.0,50.0,1000.0,10.0,750.0,5.0,500.0,1.0,250.0,0.5)
for (j in 1:11){
if ((j!=5)&(j!=3)){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
t1[(t1[,5]==0)|(t1[,2]!=0),4]=NA
matrix=matrix(t1[,4],nrow=3,byrow=T)
y=colMeans(matrix,na.rm=T)
points(con,y,col=j,cex=0.5,pch=j)
}
}
legend("topright",col=c(1:2,4,6:11),pch=c(1:2,4,6:11),paste(names,ID[,1])[c(1:2,4,6:11)],cex=0.8)
dev.copy(pdf,"fitting_points_1.pdf")
dev.off()
dev.off()
##
i=2
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration(nM)",ylab="normalized SI",main="Average Points")
con=c(5000.0,100.0,2500.0,50.0,1000.0,10.0,750.0,5.0,500.0,1.0,250.0,0.5)
for (j in 1:11){
if (j!=5){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
t1[(t1[,5]==0)|(t1[,2]!=0),4]=NA
matrix=matrix(t1[,4],nrow=3,byrow=T)
y=colMeans(matrix,na.rm=T)
points(con,y,col=j,cex=0.5,pch=j)
}
}
legend("topright",col=c(1:4,6:11),pch=c(1:4,6:11),paste(names,ID[,1])[c(1:4,6:11)],cex=0.8)
dev.copy(pdf,"fitting_points (II).pdf")
dev.off()
dev.off()
######################### log scale (curve) #######################
i=1
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration(nM)",ylab="normalized SI",log="xy",main="fitting curve I (log scale)")
for (j in 1:11){
if ((j!=5)&(j!=3)){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
matrix(t1[,5],nrow=3,byrow=T)
y0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),4])
x0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),1])
reg2=nls(y0~a-log(1+exp(d-x0)),start=list(a=7,d=4))
##points(exp(x0),exp(y0),col=j,cex=0.5,pch=j)
a=coef(reg2)[1]
d=coef(reg2)[2]
curve(exp(llog(log(x),a,d)),from=0.5,to=5000,col=j,add=T,log="x")
}
}
legend("bottomright",col=c(1:2,4,6:11),lty=1,paste(names,ID[,1])[c(1:2,4,6:11)],cex=0.8)
dev.copy(pdf,"fitting_curve_log_1.pdf")
dev.off()
dev.off()
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",log="xy",main="Fitting Curve II (log scale)",xlab="Concentration",yalb="normalized SI")
i=2
for (j in 1:11){
if (j!=5){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
matrix(t1[,5],nrow=3,byrow=T)
y0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),4])
x0=log1p(t1[(t1[,5]==1)&(t1[,2]==0),1])
reg2=nls(y0~a-log(1+exp(d-x0)),start=list(a=7,d=4))
##points(exp(x0),exp(y0),col=j,cex=0.5,pch=j)
a=coef(reg2)[1]
d=coef(reg2)[2]
curve(exp(llog(log(x),a,d)),from=0.5,to=5000,col=j,add=T,log="x")
}
}
legend("bottomright",col=c(1:2,4:11),lty=1,paste(names,ID[,2])[c(1:2,4:11)],cex=0.8)
dev.copy(pdf,"fitting_curve_log_2.pdf")
dev.off()
dev.off()
######################### log scale (average points) #######################
i=1
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",xlab="Concentration(nM)",log="xy",ylab="normalized SI",main="average points I (log scale)")
con=c(5000.0,100.0,2500.0,50.0,1000.0,10.0,750.0,5.0,500.0,1.0,250.0,0.5)
for (j in 1:11){
if ((j!=5)&(j!=3)){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
t1[(t1[,5]==0)|(t1[,2]!=0),4]=NA
matrix=matrix(t1[,4],nrow=3,byrow=T)
y=colMeans(matrix,na.rm=T)
points(con,y,col=j,cex=0.5,pch=j)
}
}
legend("bottomright",col=c(1:2,4,6:11),pch=c(1:2,4,6:11),paste(names,ID[,1])[c(1:2,4,6:11)],cex=0.8)
dev.copy(pdf,"fitting_points_log_1.pdf")
dev.off()
dev.off()
######points
i=2
plot(1,1,xlim=c(0.5,5000),ylim=c(10^2,2*10^8),type="n",log="xy",xlab="Concentration (nM)",ylab="normalized SI",main="average points II (log scale)")
con=c(5000.0,100.0,2500.0,50.0,1000.0,10.0,750.0,5.0,500.0,1.0,250.0,0.5)
for (j in 1:11){
if (j!=5){
t1=x[(x[,4]==names[j])&(x[,8]==ID[j,i]),c(5,7,13,44,39)]
t1[(t1[,5]==0)|(t1[,2]!=0),4]=NA
matrix=matrix(t1[,4],nrow=3,byrow=T)
y=colMeans(matrix,na.rm=T)
points(con,y,col=j,cex=0.5,pch=j)
}
}
legend("bottomright",col=c(1:4,6:11),pch=c(1:4,6:11),paste(names,ID[,1])[c(1:4,6:11)],cex=0.8)
dev.copy(pdf,"fitting_points_log_2.pdf")
dev.off()
dev.off()
|
#### Load Data #####
authorships <- read.csv("../data/authorships_with_genderizer.csv")
fieldTable <- read.csv("../data/fieldChar_v2_post60.csv", header = T)
# Get list of terminal fields
terminalFieldsTotal <- fieldTable$cluster[which(fieldTable$termField == 1)]
# each authorship to their terminal field
splits <- strsplit(as.character(authorships$cluster), ":")
getSubs <- function(x){sapply(1:length(x), function(z){paste(x[1:z], collapse = ":")} )}
termIndex <- sapply(sapply(sapply(splits, getSubs), match, terminalFieldsTotal), max, na.rm = T)
terminalFieldsAuthorships <- terminalFieldsTotal[termIndex]
indicesToImpute <- which(authorships$gender == 10 & !is.na(terminalFieldsAuthorships))
indicesInFieldTable <- match(terminalFieldsAuthorships[indicesToImpute], fieldTable$cluster)
### Low Homophily Scenario ###
propForAuthorshipsLow <- fieldTable$pctFem[indicesInFieldTable] / (1 - fieldTable$pctUnest[indicesInFieldTable])
set.seed(10101)
for(i in 1:10){
imputedGenders <- rbinom(length(propForAuthorshipsLow), size = 1, prob = propForAuthorshipsLow)
genderAll <- authorships$gender
genderAll[indicesToImpute] <- imputedGenders
write.csv(genderAll, paste("lowHomophily_", i, ".csv", sep = ""))
}
### High Homophily Scenario ###
# all paperIDs with authorships to imput
pIdMissing <- unique(authorships$pID[indicesToImpute])
# If paper has at least 1 estimated author, get the m/f split
probs <- aggregate(authorships$gender ~ authorships$pID, FUN = mean, subset = authorships$pID %in% pIdMissing & authorships$gender!= 10)
# indices corresponding to at least 1 estimated author
regularInd <- indicesToImpute[which(authorships$pID[indicesToImpute] %in% probs[,1 ])]
regularProbs <- probs[match(authorships$pID[regularInd], probs[, 1]), 2]
# which indices correspond to sampling with same prob as TF and setting all to same
irregularInd <- indicesToImpute[which(!(authorships$pID[indicesToImpute] %in% probs[, 1]))]
# which papers correspond to sampling with same prob as TF and setting all to same
irregularPID <- unique(authorships$pID[irregularInd])
irregularIndicesInFieldTable <- match(terminalFieldsAuthorships[match(irregularPID, authorships$pID)], fieldTable$cluster)
irregularProbs <- fieldTable$pctFem[irregularIndicesInFieldTable] / (1 - fieldTable$pctUnest[irregularIndicesInFieldTable])
set.seed(10101)
for(i in 1:10){
regularImpute <- rbinom(length(regularProbs), size = 1, prob = regularProbs)
irregularImpute <- rbinom(length(irregularProbs), size = 1, prob = irregularProbs)
genderAll <- authorships$gender
genderAll[regularInd] <- regularImpute
genderAll[irregularInd] <- irregularImpute[match(authorships$pID[irregularInd], irregularPID)]
write.csv(genderAll, paste("highHomophily_", i, ".csv", sep = ""))
}
| /imputations.R | no_license | ysamwang/genderHomophily | R | false | false | 2,776 | r | #### Load Data #####
authorships <- read.csv("../data/authorships_with_genderizer.csv")
fieldTable <- read.csv("../data/fieldChar_v2_post60.csv", header = T)
# Get list of terminal fields
terminalFieldsTotal <- fieldTable$cluster[which(fieldTable$termField == 1)]
# each authorship to their terminal field
splits <- strsplit(as.character(authorships$cluster), ":")
getSubs <- function(x){sapply(1:length(x), function(z){paste(x[1:z], collapse = ":")} )}
termIndex <- sapply(sapply(sapply(splits, getSubs), match, terminalFieldsTotal), max, na.rm = T)
terminalFieldsAuthorships <- terminalFieldsTotal[termIndex]
indicesToImpute <- which(authorships$gender == 10 & !is.na(terminalFieldsAuthorships))
indicesInFieldTable <- match(terminalFieldsAuthorships[indicesToImpute], fieldTable$cluster)
### Low Homophily Scenario ###
propForAuthorshipsLow <- fieldTable$pctFem[indicesInFieldTable] / (1 - fieldTable$pctUnest[indicesInFieldTable])
set.seed(10101)
for(i in 1:10){
imputedGenders <- rbinom(length(propForAuthorshipsLow), size = 1, prob = propForAuthorshipsLow)
genderAll <- authorships$gender
genderAll[indicesToImpute] <- imputedGenders
write.csv(genderAll, paste("lowHomophily_", i, ".csv", sep = ""))
}
### High Homophily Scenario ###
# all paperIDs with authorships to imput
pIdMissing <- unique(authorships$pID[indicesToImpute])
# If paper has at least 1 estimated author, get the m/f split
probs <- aggregate(authorships$gender ~ authorships$pID, FUN = mean, subset = authorships$pID %in% pIdMissing & authorships$gender!= 10)
# indices corresponding to at least 1 estimated author
regularInd <- indicesToImpute[which(authorships$pID[indicesToImpute] %in% probs[,1 ])]
regularProbs <- probs[match(authorships$pID[regularInd], probs[, 1]), 2]
# which indices correspond to sampling with same prob as TF and setting all to same
irregularInd <- indicesToImpute[which(!(authorships$pID[indicesToImpute] %in% probs[, 1]))]
# which papers correspond to sampling with same prob as TF and setting all to same
irregularPID <- unique(authorships$pID[irregularInd])
irregularIndicesInFieldTable <- match(terminalFieldsAuthorships[match(irregularPID, authorships$pID)], fieldTable$cluster)
irregularProbs <- fieldTable$pctFem[irregularIndicesInFieldTable] / (1 - fieldTable$pctUnest[irregularIndicesInFieldTable])
set.seed(10101)
for(i in 1:10){
regularImpute <- rbinom(length(regularProbs), size = 1, prob = regularProbs)
irregularImpute <- rbinom(length(irregularProbs), size = 1, prob = irregularProbs)
genderAll <- authorships$gender
genderAll[regularInd] <- regularImpute
genderAll[irregularInd] <- irregularImpute[match(authorships$pID[irregularInd], irregularPID)]
write.csv(genderAll, paste("highHomophily_", i, ".csv", sep = ""))
}
|
#' Distance data for the test points
#'
#' The distance data for the test points, both for decomposition, as for
#' the validation of the forecast results.
#'
#' @format An object of class dockless_dfc, containing 500 data frames of
#' subclass dockless_df, with 2117 rows.
#' \describe{
#' \item{bike_id}{unique identification number of the bike}
#' \item{time}{timestamp of the observation}
#' \item{distance}{distance to the neareast available bike, in meters}
#' }
"distancedata_testpoints"
| /R/distancedata_testpoints.R | no_license | luukvdmeer/dockless | R | false | false | 501 | r | #' Distance data for the test points
#'
#' The distance data for the test points, both for decomposition, as for
#' the validation of the forecast results.
#'
#' @format An object of class dockless_dfc, containing 500 data frames of
#' subclass dockless_df, with 2117 rows.
#' \describe{
#' \item{bike_id}{unique identification number of the bike}
#' \item{time}{timestamp of the observation}
#' \item{distance}{distance to the neareast available bike, in meters}
#' }
"distancedata_testpoints"
|
library(ggplot2)
library(tidyr)
library(dplyr)
library(rstan)
library(data.table)
library(lubridate)
library(gdata)
library(EnvStats)
library(matrixStats)
library(scales)
library(gridExtra)
library(ggpubr)
library(bayesplot)
library(cowplot)
library(gsheet)
source("geom-stepribbon.r")
#---------------------------------------------------------------------------
make_forecast_plot <- function(){
args <- commandArgs(trailingOnly = TRUE)
filename <- args[1]
load(paste0("combinedTDI/results/", filename))
filename <- strsplit(filename, "-stanfit.Rdata")[[1]][1]
for(i in 1:length(countries)){
N <- length(dates[[i]])
N2 <- N + forecast
country <- countries[[i]]
predicted_cases <- colMeans(prediction[,1:N,i])
predicted_cases_li <- colQuantiles(prediction[,1:N,i], probs=.025)
predicted_cases_ui <- colQuantiles(prediction[,1:N,i], probs=.975)
estimated_deaths <- colMeans(estimated.deaths[,1:N,i])
estimated_deaths_li <- colQuantiles(estimated.deaths[,1:N,i], probs=.025)
estimated_deaths_ui <- colQuantiles(estimated.deaths[,1:N,i], probs=.975)
estimated_deaths_forecast <- colMeans(estimated.deaths[,1:N2,i])[N:N2]
estimated_deaths_li_forecast <- colQuantiles(estimated.deaths[,1:N2,i], probs=.025)[N:N2]
estimated_deaths_ui_forecast <- colQuantiles(estimated.deaths[,1:N2,i], probs=.975)[N:N2]
estimated_deathsh <- colMeans(estimated.deathsh[,1:N,i])
estimated_deathsh_li <- colQuantiles(estimated.deathsh[,1:N,i], probs=.025)
estimated_deathsh_ui <- colQuantiles(estimated.deathsh[,1:N,i], probs=.975)
estimated_deathsh_li2 <- colQuantiles(estimated.deathsh[,1:N,i], probs=.25)
estimated_deathsh_ui2 <- colQuantiles(estimated.deathsh[,1:N,i], probs=.75)
estimated_deathsh_forecast <- colMeans(estimated.deathsh[,1:N2,i])[N:N2]
estimated_deathsh_li_forecast <- colQuantiles(estimated.deathsh[,1:N2,i], probs=.025)[N:N2]
estimated_deathsh_ui_forecast <- colQuantiles(estimated.deathsh[,1:N2,i], probs=.975)[N:N2]
estimated_deathsc <- colMeans(estimated.deathsc[,1:N,i])
estimated_deathsc_li <- colQuantiles(estimated.deathsc[,1:N,i], probs=.025)
estimated_deathsc_ui <- colQuantiles(estimated.deathsc[,1:N,i], probs=.975)
estimated_deathsc_li2 <- colQuantiles(estimated.deathsc[,1:N,i], probs=.25)
estimated_deathsc_ui2 <- colQuantiles(estimated.deathsc[,1:N,i], probs=.75)
estimated_deathsc_forecast <- colMeans(estimated.deathsc[,1:N2,i])[N:N2]
estimated_deathsc_li_forecast <- colQuantiles(estimated.deathsc[,1:N2,i], probs=.025)[N:N2]
estimated_deathsc_ui_forecast <- colQuantiles(estimated.deathsc[,1:N2,i], probs=.975)[N:N2]
estimated_cases <- colMeans(estimated.cases[,1:N,i])
estimated_cases_li <- colQuantiles(estimated.cases[,1:N,i], probs=.025)
estimated_cases_ui <- colQuantiles(estimated.cases[,1:N,i], probs=.975)
estimated_cases_li2 <- colQuantiles(estimated.cases[,1:N,i], probs=.25)
estimated_cases_ui2 <- colQuantiles(estimated.cases[,1:N,i], probs=.75)
estimated_cases_forecast <- colMeans(estimated.cases[,1:N2,i])[N:N2]
estimated_cases_li_forecast <- colQuantiles(estimated.cases[,1:N2,i], probs=.025)[N:N2]
estimated_cases_ui_forecast <- colQuantiles(estimated.cases[,1:N2,i], probs=.975)[N:N2]
estimated_hosp <- colMeans(estimated.hospitals[,1:N,i])
estimated_hosp_li <- colQuantiles(estimated.hospitals[,1:N,i], probs=.025)
estimated_hosp_ui <- colQuantiles(estimated.hospitals[,1:N,i], probs=.975)
estimated_hosp_li2 <- colQuantiles(estimated.hospitals[,1:N,i], probs=.25)
estimated_hosp_ui2 <- colQuantiles(estimated.hospitals[,1:N,i], probs=.75)
estimated_hosp_forecast <- colMeans(estimated.hospitals[,1:N2,i])[N:N2]
estimated_hosp_li_forecast <- colQuantiles(estimated.hospitals[,1:N2,i], probs=.025)[N:N2]
estimated_hosp_ui_forecast <- colQuantiles(estimated.hospitals[,1:N2,i], probs=.975)[N:N2]
estimated_hospi <- colMeans(estimated.hospitalsi[,1:N,i])
estimated_hospi_li <- colQuantiles(estimated.hospitalsi[,1:N,i], probs=.025)
estimated_hospi_ui <- colQuantiles(estimated.hospitalsi[,1:N,i], probs=.975)
estimated_hospi_li2 <- colQuantiles(estimated.hospitalsi[,1:N,i], probs=.25)
estimated_hospi_ui2 <- colQuantiles(estimated.hospitalsi[,1:N,i], probs=.75)
estimated_hospi_forecast <- colMeans(estimated.hospitalsi[,1:N2,i])[N:N2]
estimated_hospi_li_forecast <- colQuantiles(estimated.hospitalsi[,1:N2,i], probs=.025)[N:N2]
estimated_hospi_ui_forecast <- colQuantiles(estimated.hospitalsi[,1:N2,i], probs=.975)[N:N2]
estimated_hospo <- colMeans(estimated.hospitalso[,1:N,i])
estimated_hospo_li <- colQuantiles(estimated.hospitalso[,1:N,i], probs=.025)
estimated_hospo_ui <- colQuantiles(estimated.hospitalso[,1:N,i], probs=.975)
estimated_hospo_li2 <- colQuantiles(estimated.hospitalso[,1:N,i], probs=.25)
estimated_hospo_ui2 <- colQuantiles(estimated.hospitalso[,1:N,i], probs=.75)
estimated_hospo_forecast <- colMeans(estimated.hospitalso[,1:N2,i])[N:N2]
estimated_hospo_li_forecast <- colQuantiles(estimated.hospitalso[,1:N2,i], probs=.025)[N:N2]
estimated_hospo_ui_forecast <- colQuantiles(estimated.hospitalso[,1:N2,i], probs=.975)[N:N2]
estimated_icu <- colMeans(estimated.icus[,1:N,i])
estimated_icu_li <- colQuantiles(estimated.icus[,1:N,i], probs=.025)
estimated_icu_ui <- colQuantiles(estimated.icus[,1:N,i], probs=.975)
estimated_icu_li2 <- colQuantiles(estimated.icus[,1:N,i], probs=.25)
estimated_icu_ui2 <- colQuantiles(estimated.icus[,1:N,i], probs=.75)
estimated_icu_forecast <- colMeans(estimated.icus[,1:N2,i])[N:N2]
estimated_icu_li_forecast <- colQuantiles(estimated.icus[,1:N2,i], probs=.025)[N:N2]
estimated_icu_ui_forecast <- colQuantiles(estimated.icus[,1:N2,i], probs=.975)[N:N2]
estimated_icui <- colMeans(estimated.icusi[,1:N,i])
estimated_icui_li <- colQuantiles(estimated.icusi[,1:N,i], probs=.025)
estimated_icui_ui <- colQuantiles(estimated.icusi[,1:N,i], probs=.975)
estimated_icui_li2 <- colQuantiles(estimated.icusi[,1:N,i], probs=.25)
estimated_icui_ui2 <- colQuantiles(estimated.icusi[,1:N,i], probs=.75)
estimated_icui_forecast <- colMeans(estimated.icusi[,1:N2,i])[N:N2]
estimated_icui_li_forecast <- colQuantiles(estimated.icusi[,1:N2,i], probs=.025)[N:N2]
estimated_icui_ui_forecast <- colQuantiles(estimated.icusi[,1:N2,i], probs=.975)[N:N2]
estimated_icuo <- colMeans(estimated.icuso[,1:N,i])
estimated_icuo_li <- colQuantiles(estimated.icuso[,1:N,i], probs=.025)
estimated_icuo_ui <- colQuantiles(estimated.icuso[,1:N,i], probs=.975)
estimated_icuo_li2 <- colQuantiles(estimated.icuso[,1:N,i], probs=.25)
estimated_icuo_ui2 <- colQuantiles(estimated.icuso[,1:N,i], probs=.75)
estimated_icuo_forecast <- colMeans(estimated.icuso[,1:N2,i])[N:N2]
estimated_icuo_li_forecast <- colQuantiles(estimated.icuso[,1:N2,i], probs=.025)[N:N2]
estimated_icuo_ui_forecast <- colQuantiles(estimated.icuso[,1:N2,i], probs=.975)[N:N2]
rt <- colMeans(out$Rt_adj[,1:N,i])
rt_li <- colQuantiles(out$Rt_adj[,1:N,i],probs=.025)
rt_ui <- colQuantiles(out$Rt_adj[,1:N,i],probs=.975)
data_country <- data.frame("time" = as_date(as.character(dates[[i]])),
"country" = rep(country, length(dates[[i]])),
#"country_population" = rep(country_population, length(dates[[i]])),
"reported_cases" = reported_cases[[i]],
"reported_cases_c" = cumsum(reported_cases[[i]]),
"predicted_cases_c" = cumsum(predicted_cases),
"predicted_min_c" = cumsum(predicted_cases_li),
"predicted_max_c" = cumsum(predicted_cases_ui),
"predicted_cases" = predicted_cases,
"predicted_min" = predicted_cases_li,
"predicted_max" = predicted_cases_ui,
"deaths" = deaths_by_country[[i]],
"deaths_c" = cumsum(deaths_by_country[[i]]),
"estimated_deaths_c" = cumsum(estimated_deaths),
"death_min_c" = cumsum(estimated_deaths_li),
"death_max_c"= cumsum(estimated_deaths_ui),
"estimated_deaths" = estimated_deaths,
"death_min" = estimated_deaths_li,
"death_max"= estimated_deaths_ui,
"rt" = rt,
"rt_min" = rt_li,
"rt_max" = rt_ui)
times <- as_date(as.character(dates[[i]]))
if(is.null(forecast)) forecast <- 0
times_forecast <- times[length(times)] + 0:forecast
data_country_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_deaths_forecast,
"death_min_forecast" = estimated_deaths_li_forecast,
"death_max_forecast"= estimated_deaths_ui_forecast)
data_country2 <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_cases_c" = cumsum(estimated_cases),
"cases_min_c" = cumsum(estimated_cases_li),
"cases_max_c"= cumsum(estimated_cases_ui),
"estimated_cases" = estimated_cases,
"cases_min" = estimated_cases_li,
"cases_max"= estimated_cases_ui,
"cases_min2" = estimated_cases_li2,
"cases_max2"= estimated_cases_ui2)
data_cases_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_cases_forecast,
"death_min_forecast" = estimated_cases_li_forecast,
"death_max_forecast"= estimated_cases_ui_forecast)
data_deathsh <- data.frame("time" = as_date(as.character(dates[[i]])),
"deaths" = deathsh_by_country[[i]],
"deaths_c" = cumsum(deathsh_by_country[[i]]),
"estimated_deaths_c" = cumsum(estimated_deathsh),
"death_min_c" = cumsum(estimated_deathsh_li),
"death_max_c"= cumsum(estimated_deathsh_ui),
"estimated_deaths" = estimated_deathsh,
"death_min" = estimated_deathsh_li,
"death_max"= estimated_deathsh_ui,
"death_min2" = estimated_deathsh_li2,
"death_max2"= estimated_deathsh_ui2)
data_deathsh_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_deathsh_forecast,
"death_min_forecast" = estimated_deathsh_li_forecast,
"death_max_forecast"= estimated_deathsh_ui_forecast)
data_deathsc <- data.frame("time" = as_date(as.character(dates[[i]])),
"deaths" = deathsc_by_country[[i]],
"deaths_c" = cumsum(deathsc_by_country[[i]]),
"estimated_deaths_c" = cumsum(estimated_deathsc),
"death_min_c" = cumsum(estimated_deathsc_li),
"death_max_c"= cumsum(estimated_deathsc_ui),
"estimated_deaths" = estimated_deathsc,
"death_min" = estimated_deathsc_li,
"death_max"= estimated_deathsc_ui,
"death_min2" = estimated_deathsc_li2,
"death_max2"= estimated_deathsc_ui2)
data_deathsc_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_deathsc_forecast,
"death_min_forecast" = estimated_deathsc_li_forecast,
"death_max_forecast"= estimated_deathsc_ui_forecast)
data_hosp <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_hosp_c" = cumsum(estimated_hosp),
"hosp_min_c" = cumsum(estimated_hosp_li),
"hosp_max_c"= cumsum(estimated_hosp_ui),
"estimated_hosp" = estimated_hosp,
"hosp_min" = estimated_hosp_li,
"hosp_max"= estimated_hosp_ui,
"hosp_min2" = estimated_hosp_li2,
"hosp_max2"= estimated_hosp_ui2,
"reported_hosp" = hosps_by_country[[i]])
data_hosp_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_hosp_forecast,
"death_min_forecast" = estimated_hosp_li_forecast,
"death_max_forecast"= estimated_hosp_ui_forecast)
data_hospi <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_hosp_c" = cumsum(estimated_hospi),
"hosp_min_c" = cumsum(estimated_hospi_li),
"hosp_max_c"= cumsum(estimated_hospi_ui),
"estimated_hosp" = estimated_hospi,
"hosp_min" = estimated_hospi_li,
"hosp_max"= estimated_hospi_ui,
"hosp_min2" = estimated_hospi_li2,
"hosp_max2"= estimated_hospi_ui2,
"reported_hosp" = hospsin_by_country[[i]])
data_hospi_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_hospi_forecast,
"death_min_forecast" = estimated_hospi_li_forecast,
"death_max_forecast"= estimated_hospi_ui_forecast)
data_hospo <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_hosp_c" = cumsum(estimated_hospo),
"hosp_min_c" = cumsum(estimated_hospo_li),
"hosp_max_c"= cumsum(estimated_hospo_ui),
"estimated_hosp" = estimated_hospo,
"hosp_min" = estimated_hospo_li,
"hosp_max"= estimated_hospo_ui,
"hosp_min2" = estimated_hospo_li2,
"hosp_max2"= estimated_hospo_ui2,
"reported_hosp" = hospsout_by_country[[i]])
data_hospo_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_hospo_forecast,
"death_min_forecast" = estimated_hospo_li_forecast,
"death_max_forecast"= estimated_hospo_ui_forecast)
data_icu <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_icu_c" = cumsum(estimated_icu),
"icu_min_c" = cumsum(estimated_icu_li),
"icu_max_c"= cumsum(estimated_icu_ui),
"estimated_icu" = estimated_icu,
"icu_min" = estimated_icu_li,
"icu_max"= estimated_icu_ui,
"icu_min2" = estimated_icu_li2,
"icu_max2"= estimated_icu_ui2,
"reported_icu" = icus_by_country[[i]])
data_icu_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_icu_forecast,
"death_min_forecast" = estimated_icu_li_forecast,
"death_max_forecast"= estimated_icu_ui_forecast)
data_icui <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_icu_c" = cumsum(estimated_icui),
"icu_min_c" = cumsum(estimated_icui_li),
"icu_max_c"= cumsum(estimated_icui_ui),
"estimated_icu" = estimated_icui,
"icu_min" = estimated_icui_li,
"icu_max"= estimated_icui_ui,
"icu_min2" = estimated_icui_li2,
"icu_max2"= estimated_icui_ui2,
"reported_icu" = icusin_by_country[[i]])
data_icui_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_icui_forecast,
"death_min_forecast" = estimated_icui_li_forecast,
"death_max_forecast"= estimated_icui_ui_forecast)
data_icuo <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_icu_c" = cumsum(estimated_icuo),
"icu_min_c" = cumsum(estimated_icuo_li),
"icu_max_c"= cumsum(estimated_icuo_ui),
"estimated_icu" = estimated_icuo,
"icu_min" = estimated_icuo_li,
"icu_max"= estimated_icuo_ui,
"icu_min2" = estimated_icuo_li2,
"icu_max2"= estimated_icuo_ui2,
"reported_icu" = icusout_by_country[[i]])
data_icuo_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_icuo_forecast,
"death_min_forecast" = estimated_icuo_li_forecast,
"death_max_forecast"= estimated_icuo_ui_forecast)
###
# Add later data:
load("data.Rdata")
day<-strsplit(dd[-1,2],split="\\.")
ff<-function(i,x){
decembr <- which(unlist(lapply(x, function(iter) iter[2]=='12')))
if(identical(integer(0), decembr)){
leto <- '.2020'
} else{
if(max(december)<i){
leto <- '.2021'
} else{
leto <- '.2020'
}
}
paste(x[[i]][1],".",strsplit(day[[i]][2]," ")[[1]][2],leto,sep="")
}
day<-unlist(lapply(1:length(day),ff,day))
day<-as.Date(day,format="%d.%m.%Y")
day<-c((day[1]-1:50)[order(day[1]-1:50)],day)
if(max(dates[[i]]) < max(day)){
# Find all later dates:
wh_dates <- which(day > max(dates[[i]]))
if(forecast<length(wh_dates)){
wh_dates <- wh_dates[1:forecast]
}
# Prepare data:
# Deaths:
deaths<- as.numeric(dd$umrli..vsi.[-1]) #cumulative!
deaths[is.na(deaths)]<-0
deaths<-c(rep(0,50),deaths)
deathsi<-deaths
for (ite in 2:length(deaths)){
deathsi[ite]<-deaths[ite]-deaths[ite-1]
}
deaths<-deathsi
deathsc<-c(rep(0,50), as.numeric(dd$D.u[-1])) #cumulative! prispevek je lahko negativen!
deathsc[is.na(deathsc)] <- 0
deathsh<-c(rep(0,50), as.numeric(dd$H.D[-1])) #cumulative! prispevek je lahko negativen!
deathsh[is.na(deathsh)] <- 0
# Cases:
cases<- c(rep(0,50),as.numeric(dd$potrjeni.danes[-1]))
cases[is.na(cases)]<-0
# Hosp:
hospitalizirani<-as.numeric(dd$hospitalizirani..trenutno.[-1]) #cumulative! prispevek je lahko negativen!
hospitalizirani[is.na(hospitalizirani)]<-0
hospitalizirani<-c(rep(0,50),hospitalizirani)
hospitaliziraniin<-dd$novi[-1]
hospitaliziraniin[is.na(hospitaliziraniin)]<-0
hospitaliziraniin<-c(rep(0,50),hospitaliziraniin)
hospitaliziraniout<-as.numeric(dd$iz.bol..oskrbe..vsi.[-1])
hospitaliziraniout[is.na(hospitaliziraniout)]<-0
hospitaliziraniout<-c(rep(0,50),hospitaliziraniout)
hospitaliziraniouti<-hospitaliziraniout
for (ite in 2:length(hospitaliziraniout)){
hospitaliziraniouti[ite]<-hospitaliziraniout[ite]-hospitaliziraniout[ite-1]
}
hospitaliziraniout<-hospitaliziraniouti
# ICU:
icu<-c(rep(0,50), as.numeric(dd$intenzivna.enota..trenutno.[-1])) #cumulative! prispevek je lahko negativen!
icu[is.na(icu)] <- 0
icuin<-c(rep(0,50), as.numeric(dd$I.i[-1])) #cumulative! prispevek je lahko negativen!
icuin[is.na(icuin)] <- 0
icuout<-c(rep(0,50), as.numeric(dd$I.o[-1])) #cumulative! prispevek je lahko negativen!
icuout[is.na(icuout)] <- 0
later_data <- data.frame("time" = day[wh_dates],
"deaths" = deaths[wh_dates],
"deathsh" = deathsh[wh_dates],
"deathsc" = deathsc[wh_dates],
"cases" = cases[wh_dates],
"hosp" = hospitalizirani[wh_dates],
"hospi" = hospitaliziraniin[wh_dates],
"hospo" = hospitaliziraniout[wh_dates],
"icu" = icu[wh_dates],
"icui" = icuin[wh_dates],
"icuo" = icuout[wh_dates])
} else{
later_data <- data.frame()
}
###
posteriors_out_of_time <- list(
estimated.deaths=estimated.deaths[,(N+1):N2,i],
estimated.deathsh=estimated.deathsh[,(N+1):N2,i],
estimated.deathsc=estimated.deathsc[,(N+1):N2,i],
estimated.cases=estimated.cases[,(N+1):N2,i],
estimated.hospitals=estimated.hospitals[,(N+1):N2,i],
estimated.hospitalsi=estimated.hospitalsi[,(N+1):N2,i],
estimated.hospitalso=estimated.hospitalso[,(N+1):N2,i],
estimated.icus=estimated.icus[,(N+1):N2,i],
estimated.icusi=estimated.icusi[,(N+1):N2,i],
estimated.icuso=estimated.icuso[,(N+1):N2,i])
save(posteriors_out_of_time, file=paste0("combinedTDI/results/", country, "-posteriors-", filename, ".RData"))
###
make_single_plot(data_country = data_country, data_country2 = data_country2,
data_deathsh = data_deathsh, data_deathsc = data_deathsc,
data_hosp = data_hosp, data_hospi = data_hospi, data_hospo = data_hospo,
data_icu = data_icu, data_icui = data_icui, data_icuo = data_icuo,
data_country_forecast = data_country_forecast,
data_deathsh_forecast = data_deathsh_forecast,
data_deathsc_forecast = data_deathsc_forecast,
data_cases_forecast = data_cases_forecast,
data_hosp_forecast = data_hosp_forecast,
data_hospi_forecast = data_hospi_forecast,
data_hospo_forecast = data_hospo_forecast,
data_icu_forecast = data_icu_forecast,
data_icui_forecast = data_icui_forecast,
data_icuo_forecast = data_icuo_forecast,
filename = filename,
country = country,
model_input = model_input,
later_data = later_data)
}
}
make_single_plot <- function(data_country, data_country2,
data_deathsh, data_deathsc,
data_hosp, data_hospi, data_hospo,
data_icu, data_icui, data_icuo,
data_country_forecast,
data_deathsh_forecast, data_deathsc_forecast,
data_cases_forecast,
data_hosp_forecast, data_hospi_forecast, data_hospo_forecast,
data_icu_forecast, data_icui_forecast, data_icuo_forecast,
filename, country, model_input, later_data){
language_english <- TRUE
data_deaths <- data_country %>%
select(time, deaths, estimated_deaths) %>%
gather("key" = key, "value" = value, -time)
data_deaths_forecast <- data_country_forecast %>%
select(time, estimated_deaths_forecast) %>%
gather("key" = key, "value" = value, -time)
# Force less than 1 case to zero
data_deaths$value[data_deaths$value < 1] <- NA
data_deaths_forecast$value[data_deaths_forecast$value < 1] <- NA
data_deaths_all <- rbind(data_deaths, data_deaths_forecast)
# Lab texts:
if(language_english) xlab_text <- 'Date'
else xlab_text <- 'Datum'
if(language_english) ylab_text_deaths <- 'Daily number of deaths\n'
else ylab_text_deaths <- "Dnevno stevilo umrlih\n"
## Deaths:
p1 <- ggplot(data_country) +
geom_bar(data = data_country, aes(x = time, y = deaths),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p1 <- p1 +
geom_bar(data = later_data, aes(x = time, y = deaths),
fill = "coral4", stat='identity', alpha=0.5)
}
p1 <- p1 +
geom_line(data = data_country, aes(x = time, y = estimated_deaths),
col = "deepskyblue4") +
geom_line(data = data_country_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_country, aes(x = time,
ymin = death_min,
ymax = death_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_country_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_deaths) +
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_country$death_max, data_country$deaths, data_country_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_country$death_max, data_country$deaths, data_country_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_cases <- "Daily number of infections\n"
else ylab_text_cases <- "Dnevno stevilo potrjeno okuzenih\n"
## Deaths H:
# Lab texts:
if(language_english) xlab_text <- 'Date'
else xlab_text <- 'Datum'
if(language_english) ylab_text_deaths <- 'Deaths H\n'
else ylab_text_deaths <- "Dnevno stevilo umrlih\n"
p1_h <- ggplot(data_deathsh) +
geom_bar(data = data_deathsh, aes(x = time, y = deaths),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p1_h <- p1_h +
geom_bar(data = later_data, aes(x = time, y = deathsh),
fill = "coral4", stat='identity', alpha=0.5)
}
p1_h <- p1_h +
geom_line(data = data_deathsh, aes(x = time, y = estimated_deaths),
col = "deepskyblue4") +
geom_line(data = data_deathsh_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_deathsh, aes(x = time,
ymin = death_min,
ymax = death_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_deathsh_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_deaths) +
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_deathsh$death_max, data_deathsh$deaths, data_deathsh_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_deathsh$death_max, data_deathsh$deaths, data_deathsh_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
## Deaths C:
# Lab texts:
if(language_english) xlab_text <- 'Date'
else xlab_text <- 'Datum'
if(language_english) ylab_text_deaths <- 'Deaths C\n'
else ylab_text_deaths <- "Dnevno stevilo umrlih\n"
p1_c <- ggplot(data_deathsc) +
geom_bar(data = data_deathsc, aes(x = time, y = deaths),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p1_c <- p1_c +
geom_bar(data = later_data, aes(x = time, y = deathsc),
fill = "coral4", stat='identity', alpha=0.5)
}
p1_c <- p1_c +
geom_line(data = data_deathsc, aes(x = time, y = estimated_deaths),
col = "deepskyblue4") +
geom_line(data = data_deathsc_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_deathsc, aes(x = time,
ymin = death_min,
ymax = death_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_deathsc_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_deaths) +
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_deathsc$death_max, data_deathsc$deaths, data_deathsc_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_deathsc$death_max, data_deathsc$deaths, data_deathsc_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_cases <- "Daily number of confirmed infections\n"
else ylab_text_cases <- "Dnevno stevilo potrjeno okuzenih\n"
## Cases:
p2 <- ggplot(data_country) +
geom_bar(data = data_country, aes(x = time, y = reported_cases),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p2 <- p2 +
geom_bar(data = later_data, aes(x = time, y = cases),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p2 <- p2 +
geom_line(data = data_country2, aes(x = time, y = estimated_cases),
col = "deepskyblue4") +
geom_line(data = data_cases_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_country2, aes(x = time,
ymin = cases_min,
ymax = cases_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_cases_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_cases) +
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_country2$cases_max, data_country$reported_cases, data_cases_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_country2$cases_max, data_country$reported_cases, data_cases_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_hosp <- "Number of hospitalized patients\n"
else ylab_text_hosp <- "Stevilo hospitaliziranih\n"
## Hospitalizations:
p3 <- ggplot(data_country) +
geom_bar(data = data_hosp, aes(x = time, y = reported_hosp),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p3 <- p3 +
geom_bar(data = later_data, aes(x = time, y = hosp),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p3 <- p3 +
geom_line(data = data_hosp, aes(x = time, y = estimated_hosp),
col = "deepskyblue4") +
geom_line(data = data_hosp_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_hosp, aes(x = time,
ymin = hosp_min,
ymax = hosp_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_hosp_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_hosp)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_hosp$hosp_max, data_hosp$reported_hosp, data_hosp_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_hosp$hosp_max, data_hosp$reported_hosp, data_hosp_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_hosp <- "Hospitalized IN\n"
else ylab_text_hosp <- "Stevilo hospitaliziranih\n"
## Hospitalizations IN:
p3_IN <- ggplot(data_country) +
geom_bar(data = data_hospi, aes(x = time, y = reported_hosp),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p3_IN <- p3_IN +
geom_bar(data = later_data, aes(x = time, y = hospi),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p3_IN <- p3_IN +
geom_line(data = data_hospi, aes(x = time, y = estimated_hosp),
col = "deepskyblue4") +
geom_line(data = data_hospi_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_hospi, aes(x = time,
ymin = hosp_min,
ymax = hosp_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_hospi_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_hosp)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_hospi$hosp_max, data_hospi$reported_hosp, data_hospi_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_hospi$hosp_max, data_hospi$reported_hosp, data_hospi_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_hosp <- "Hospitalized OUT\n"
else ylab_text_hosp <- "Stevilo hospitaliziranih\n"
## Hospitalizations OUT:
p3_OUT <- ggplot(data_country) +
geom_bar(data = data_hospo, aes(x = time, y = reported_hosp),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p3_OUT <- p3_OUT +
geom_bar(data = later_data, aes(x = time, y = hospo),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p3_OUT <- p3_OUT +
geom_line(data = data_hospo, aes(x = time, y = estimated_hosp),
col = "deepskyblue4") +
geom_line(data = data_hospo_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_hospo, aes(x = time,
ymin = hosp_min,
ymax = hosp_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_hospo_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_hosp)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_hospo$hosp_max, data_hospo$reported_hosp, data_hospo_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_hospo$hosp_max, data_hospo$reported_hosp, data_hospo_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_icu <- "Number of patients in ICU\n"
else ylab_text_icu <- "Stevilo bolnikov na intenzivni negi\n"
## ICUs:
p4 <- ggplot(data_country) +
geom_bar(data = data_icu, aes(x = time, y = reported_icu),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p4 <- p4 +
geom_bar(data = later_data, aes(x = time, y = icu),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p4 <- p4 +
geom_line(data = data_icu, aes(x = time, y = estimated_icu),
col = "deepskyblue4") +
geom_line(data = data_icu_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_icu, aes(x = time,
ymin = icu_min,
ymax = icu_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_icu_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_icu)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_icu$icu_max, data_icu$reported_icu, data_icu_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_icu$icu_max, data_icu$reported_icu, data_icu_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_icu <- "ICU IN\n"
else ylab_text_icu <- "Stevilo bolnikov na intenzivni negi\n"
## ICUs IN:
p4_IN <- ggplot(data_country) +
geom_bar(data = data_icui, aes(x = time, y = reported_icu),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p4_IN <- p4_IN +
geom_bar(data = later_data, aes(x = time, y = icui),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p4_IN <- p4_IN +
geom_line(data = data_icui, aes(x = time, y = estimated_icu),
col = "deepskyblue4") +
geom_line(data = data_icui_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_icui, aes(x = time,
ymin = icu_min,
ymax = icu_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_icui_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_icu)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_icui$icu_max, data_icui$reported_icu, data_icui_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_icui$icu_max, data_icui$reported_icu, data_icui_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_icu <- "ICU OUT\n"
else ylab_text_icu <- "Stevilo bolnikov na intenzivni negi\n"
## ICUs OUT:
p4_OUT <- ggplot(data_country) +
geom_bar(data = data_icuo, aes(x = time, y = reported_icu),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p4_OUT <- p4_OUT +
geom_bar(data = later_data, aes(x = time, y = icuo),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p4_OUT <- p4_OUT +
geom_line(data = data_icuo, aes(x = time, y = estimated_icu),
col = "deepskyblue4") +
geom_line(data = data_icuo_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_icuo, aes(x = time,
ymin = icu_min,
ymax = icu_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_icuo_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_icu)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_icuo$icu_max, data_icuo$reported_icu, data_icuo_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_icuo$icu_max, data_icuo$reported_icu, data_icuo_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
p <- plot_grid(p1,p2, p3, p4,
p1_h, p1_c, p3_IN, p3_OUT, p4_IN, p4_OUT,
ncol = 4)
save_plot(filename = paste0("combinedTDI/figures/", country, "_forecast_", filename, ".pdf"),
p, base_width = 10*4/3, base_height=7*3/2)
# Produce plots for Website
dir.create("combinedTDI/web/figures/desktop/", showWarnings = FALSE, recursive = TRUE)
save_plot(filename = paste0("combinedTDI/web/figures/desktop/", country, "_forecast_", filename, ".svg"),
p, base_width = 14, base_height=7)
dir.create("combinedTDI/web/figures/mobile/", showWarnings = FALSE, recursive = TRUE)
save_plot(filename = paste0("combinedTDI/web/figures/mobile/", country, "_forecast_", filename, ".svg"),
p, base_width = 10, base_height=7
# base_height = 4, base_asp = 1.1
)
###
# Plot only 4 graphs:
p <- plot_grid(p1,p2, p3, p4,
ncol = 4)
save_plot(filename = paste0("combinedTDI/figures/", country, "_forecast4_", filename, ".pdf"),
p, base_width = 14)
# Produce plots for Website
dir.create("combinedTDI/web/figures/desktop/", showWarnings = FALSE, recursive = TRUE)
save_plot(filename = paste0("combinedTDI/web/figures/desktop/", country, "_forecast4_", filename, ".svg"),
p, base_width = 14#, base_height=7
)
dir.create("combinedTDI/web/figures/mobile/", showWarnings = FALSE, recursive = TRUE)
save_plot(filename = paste0("combinedTDI/web/figures/mobile/", country, "_forecast4_", filename, ".svg"),
p, base_width = 14#, base_height=7
# base_height = 4, base_asp = 1.1
)
# Additional plot for ibmi html:
if(file.exists(paste0("combinedTDI/results/", filename, '_', "graphs.RData"))){
p6 <- p4
p5 <- p3
p4 <- p2
p2 <- p1
load(file=paste0("combinedTDI/results/", filename, '_', "graphs.RData"))
p3_mobile <- p3_mobile +
theme(legend.text=element_text(size=7))
p_grouped <- plot_grid(p1, p4, p3_mobile,
ncol = 3, rel_widths = c(1, 1, 1))
p_grouped2 <- plot_grid(p2, p1_h, p1_c,
ncol = 3, rel_widths = c(1,1,1))
p_grouped3 <- plot_grid(p5, p3_IN, p3_OUT,
ncol = 3, rel_widths = c(1,1,1))
p_grouped4 <- plot_grid(p6, p4_IN, p4_OUT,
ncol = 3, rel_widths = c(1,1,1))
p <- plot_grid(p_grouped, p_grouped2,
p_grouped3, p_grouped4, nrow=4)
# cowplot::save_plot(filename = paste0("figures/", country, "_together_", filename, ".pdf"),
# p, base_width = 14, base_height=7.42)
save_plot(filename = paste0("combinedTDI/web/figures/mobile/", country, "_together_", filename, ".svg"),
p, base_width = 14, base_height=7.42*2
)
}
# Calculate MSE:
mse_df <- data.frame(forecast=0,
deaths_MSE=0,
deathsh_MSE=0,
deathsc_MSE=0,
cases_MSE=0,
hosp_MSE=0,
hospi_MSE=0,
hospo_MSE=0,
icu_MSE=0,
icui_MSE=0,
icuo_MSE=0)
if(nrow(later_data)>0){
tajms <- c(1, 3, 5, 7, 10, 14)
tajms <- tajms[tajms<=nrow(later_data)]
# This is: (\hat{Y_i} - Y_i)^2 / \hat{Y_i}^2
for(for_i in tajms){
deaths_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$deaths[iti] - data_country_forecast$estimated_deaths_forecast[iti+1])^2/data_country_forecast$estimated_deaths_forecast[iti+1]))
deathsh_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$deathsh[iti] - data_deathsh_forecast$estimated_deaths_forecast[iti+1])^2/data_deathsh_forecast$estimated_deaths_forecast[iti+1]))
deathsc_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$deathsc[iti] - data_deathsc_forecast$estimated_deaths_forecast[iti+1])^2/data_deathsc_forecast$estimated_deaths_forecast[iti+1]))
cases_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$cases[iti] - data_cases_forecast$estimated_deaths_forecast[iti+1])^2/data_cases_forecast$estimated_deaths_forecast[iti+1]))
hosp_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$hosp[iti] - data_hosp_forecast$estimated_deaths_forecast[iti+1])^2/data_hosp_forecast$estimated_deaths_forecast[iti+1]))
hospi_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$hospi[iti] - data_hospi_forecast$estimated_deaths_forecast[iti+1])^2/data_hospi_forecast$estimated_deaths_forecast[iti+1]))
hospo_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$hospo[iti] - data_hospo_forecast$estimated_deaths_forecast[iti+1])^2/data_hospo_forecast$estimated_deaths_forecast[iti+1]))
icu_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$icu[iti] - data_icu_forecast$estimated_deaths_forecast[iti+1])^2/data_icu_forecast$estimated_deaths_forecast[iti+1]))
icui_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$icui[iti] - data_icui_forecast$estimated_deaths_forecast[iti+1])^2/data_icui_forecast$estimated_deaths_forecast[iti+1]))
icuo_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$icuo[iti] - data_icuo_forecast$estimated_deaths_forecast[iti+1])^2/data_icuo_forecast$estimated_deaths_forecast[iti+1]))
mse_df_tmp <- data.frame(forecast=for_i,
deaths_MSE=deaths_MSE,
deathsh_MSE=deathsh_MSE,
deathsc_MSE=deathsc_MSE,
cases_MSE=cases_MSE,
hosp_MSE=hosp_MSE,
hospi_MSE=hospi_MSE,
hospo_MSE=hospo_MSE,
icu_MSE=icu_MSE,
icui_MSE=icui_MSE,
icuo_MSE=icuo_MSE)
mse_df <- rbind(mse_df,
mse_df_tmp)
}
mse_df <- mse_df[-1,]
}
save(mse_df, file=paste0("combinedTDI/results/", country, "-MSE-", filename, ".RData"))
}
#-----------------------------------------------------------------------------------------------
make_forecast_plot()
| /combinedTDI/plot-forecast.r | permissive | marijapurgar/COVID-19-Slovenia | R | false | false | 59,777 | r | library(ggplot2)
library(tidyr)
library(dplyr)
library(rstan)
library(data.table)
library(lubridate)
library(gdata)
library(EnvStats)
library(matrixStats)
library(scales)
library(gridExtra)
library(ggpubr)
library(bayesplot)
library(cowplot)
library(gsheet)
source("geom-stepribbon.r")
#---------------------------------------------------------------------------
make_forecast_plot <- function(){
args <- commandArgs(trailingOnly = TRUE)
filename <- args[1]
load(paste0("combinedTDI/results/", filename))
filename <- strsplit(filename, "-stanfit.Rdata")[[1]][1]
for(i in 1:length(countries)){
N <- length(dates[[i]])
N2 <- N + forecast
country <- countries[[i]]
predicted_cases <- colMeans(prediction[,1:N,i])
predicted_cases_li <- colQuantiles(prediction[,1:N,i], probs=.025)
predicted_cases_ui <- colQuantiles(prediction[,1:N,i], probs=.975)
estimated_deaths <- colMeans(estimated.deaths[,1:N,i])
estimated_deaths_li <- colQuantiles(estimated.deaths[,1:N,i], probs=.025)
estimated_deaths_ui <- colQuantiles(estimated.deaths[,1:N,i], probs=.975)
estimated_deaths_forecast <- colMeans(estimated.deaths[,1:N2,i])[N:N2]
estimated_deaths_li_forecast <- colQuantiles(estimated.deaths[,1:N2,i], probs=.025)[N:N2]
estimated_deaths_ui_forecast <- colQuantiles(estimated.deaths[,1:N2,i], probs=.975)[N:N2]
estimated_deathsh <- colMeans(estimated.deathsh[,1:N,i])
estimated_deathsh_li <- colQuantiles(estimated.deathsh[,1:N,i], probs=.025)
estimated_deathsh_ui <- colQuantiles(estimated.deathsh[,1:N,i], probs=.975)
estimated_deathsh_li2 <- colQuantiles(estimated.deathsh[,1:N,i], probs=.25)
estimated_deathsh_ui2 <- colQuantiles(estimated.deathsh[,1:N,i], probs=.75)
estimated_deathsh_forecast <- colMeans(estimated.deathsh[,1:N2,i])[N:N2]
estimated_deathsh_li_forecast <- colQuantiles(estimated.deathsh[,1:N2,i], probs=.025)[N:N2]
estimated_deathsh_ui_forecast <- colQuantiles(estimated.deathsh[,1:N2,i], probs=.975)[N:N2]
estimated_deathsc <- colMeans(estimated.deathsc[,1:N,i])
estimated_deathsc_li <- colQuantiles(estimated.deathsc[,1:N,i], probs=.025)
estimated_deathsc_ui <- colQuantiles(estimated.deathsc[,1:N,i], probs=.975)
estimated_deathsc_li2 <- colQuantiles(estimated.deathsc[,1:N,i], probs=.25)
estimated_deathsc_ui2 <- colQuantiles(estimated.deathsc[,1:N,i], probs=.75)
estimated_deathsc_forecast <- colMeans(estimated.deathsc[,1:N2,i])[N:N2]
estimated_deathsc_li_forecast <- colQuantiles(estimated.deathsc[,1:N2,i], probs=.025)[N:N2]
estimated_deathsc_ui_forecast <- colQuantiles(estimated.deathsc[,1:N2,i], probs=.975)[N:N2]
estimated_cases <- colMeans(estimated.cases[,1:N,i])
estimated_cases_li <- colQuantiles(estimated.cases[,1:N,i], probs=.025)
estimated_cases_ui <- colQuantiles(estimated.cases[,1:N,i], probs=.975)
estimated_cases_li2 <- colQuantiles(estimated.cases[,1:N,i], probs=.25)
estimated_cases_ui2 <- colQuantiles(estimated.cases[,1:N,i], probs=.75)
estimated_cases_forecast <- colMeans(estimated.cases[,1:N2,i])[N:N2]
estimated_cases_li_forecast <- colQuantiles(estimated.cases[,1:N2,i], probs=.025)[N:N2]
estimated_cases_ui_forecast <- colQuantiles(estimated.cases[,1:N2,i], probs=.975)[N:N2]
estimated_hosp <- colMeans(estimated.hospitals[,1:N,i])
estimated_hosp_li <- colQuantiles(estimated.hospitals[,1:N,i], probs=.025)
estimated_hosp_ui <- colQuantiles(estimated.hospitals[,1:N,i], probs=.975)
estimated_hosp_li2 <- colQuantiles(estimated.hospitals[,1:N,i], probs=.25)
estimated_hosp_ui2 <- colQuantiles(estimated.hospitals[,1:N,i], probs=.75)
estimated_hosp_forecast <- colMeans(estimated.hospitals[,1:N2,i])[N:N2]
estimated_hosp_li_forecast <- colQuantiles(estimated.hospitals[,1:N2,i], probs=.025)[N:N2]
estimated_hosp_ui_forecast <- colQuantiles(estimated.hospitals[,1:N2,i], probs=.975)[N:N2]
estimated_hospi <- colMeans(estimated.hospitalsi[,1:N,i])
estimated_hospi_li <- colQuantiles(estimated.hospitalsi[,1:N,i], probs=.025)
estimated_hospi_ui <- colQuantiles(estimated.hospitalsi[,1:N,i], probs=.975)
estimated_hospi_li2 <- colQuantiles(estimated.hospitalsi[,1:N,i], probs=.25)
estimated_hospi_ui2 <- colQuantiles(estimated.hospitalsi[,1:N,i], probs=.75)
estimated_hospi_forecast <- colMeans(estimated.hospitalsi[,1:N2,i])[N:N2]
estimated_hospi_li_forecast <- colQuantiles(estimated.hospitalsi[,1:N2,i], probs=.025)[N:N2]
estimated_hospi_ui_forecast <- colQuantiles(estimated.hospitalsi[,1:N2,i], probs=.975)[N:N2]
estimated_hospo <- colMeans(estimated.hospitalso[,1:N,i])
estimated_hospo_li <- colQuantiles(estimated.hospitalso[,1:N,i], probs=.025)
estimated_hospo_ui <- colQuantiles(estimated.hospitalso[,1:N,i], probs=.975)
estimated_hospo_li2 <- colQuantiles(estimated.hospitalso[,1:N,i], probs=.25)
estimated_hospo_ui2 <- colQuantiles(estimated.hospitalso[,1:N,i], probs=.75)
estimated_hospo_forecast <- colMeans(estimated.hospitalso[,1:N2,i])[N:N2]
estimated_hospo_li_forecast <- colQuantiles(estimated.hospitalso[,1:N2,i], probs=.025)[N:N2]
estimated_hospo_ui_forecast <- colQuantiles(estimated.hospitalso[,1:N2,i], probs=.975)[N:N2]
estimated_icu <- colMeans(estimated.icus[,1:N,i])
estimated_icu_li <- colQuantiles(estimated.icus[,1:N,i], probs=.025)
estimated_icu_ui <- colQuantiles(estimated.icus[,1:N,i], probs=.975)
estimated_icu_li2 <- colQuantiles(estimated.icus[,1:N,i], probs=.25)
estimated_icu_ui2 <- colQuantiles(estimated.icus[,1:N,i], probs=.75)
estimated_icu_forecast <- colMeans(estimated.icus[,1:N2,i])[N:N2]
estimated_icu_li_forecast <- colQuantiles(estimated.icus[,1:N2,i], probs=.025)[N:N2]
estimated_icu_ui_forecast <- colQuantiles(estimated.icus[,1:N2,i], probs=.975)[N:N2]
estimated_icui <- colMeans(estimated.icusi[,1:N,i])
estimated_icui_li <- colQuantiles(estimated.icusi[,1:N,i], probs=.025)
estimated_icui_ui <- colQuantiles(estimated.icusi[,1:N,i], probs=.975)
estimated_icui_li2 <- colQuantiles(estimated.icusi[,1:N,i], probs=.25)
estimated_icui_ui2 <- colQuantiles(estimated.icusi[,1:N,i], probs=.75)
estimated_icui_forecast <- colMeans(estimated.icusi[,1:N2,i])[N:N2]
estimated_icui_li_forecast <- colQuantiles(estimated.icusi[,1:N2,i], probs=.025)[N:N2]
estimated_icui_ui_forecast <- colQuantiles(estimated.icusi[,1:N2,i], probs=.975)[N:N2]
estimated_icuo <- colMeans(estimated.icuso[,1:N,i])
estimated_icuo_li <- colQuantiles(estimated.icuso[,1:N,i], probs=.025)
estimated_icuo_ui <- colQuantiles(estimated.icuso[,1:N,i], probs=.975)
estimated_icuo_li2 <- colQuantiles(estimated.icuso[,1:N,i], probs=.25)
estimated_icuo_ui2 <- colQuantiles(estimated.icuso[,1:N,i], probs=.75)
estimated_icuo_forecast <- colMeans(estimated.icuso[,1:N2,i])[N:N2]
estimated_icuo_li_forecast <- colQuantiles(estimated.icuso[,1:N2,i], probs=.025)[N:N2]
estimated_icuo_ui_forecast <- colQuantiles(estimated.icuso[,1:N2,i], probs=.975)[N:N2]
rt <- colMeans(out$Rt_adj[,1:N,i])
rt_li <- colQuantiles(out$Rt_adj[,1:N,i],probs=.025)
rt_ui <- colQuantiles(out$Rt_adj[,1:N,i],probs=.975)
data_country <- data.frame("time" = as_date(as.character(dates[[i]])),
"country" = rep(country, length(dates[[i]])),
#"country_population" = rep(country_population, length(dates[[i]])),
"reported_cases" = reported_cases[[i]],
"reported_cases_c" = cumsum(reported_cases[[i]]),
"predicted_cases_c" = cumsum(predicted_cases),
"predicted_min_c" = cumsum(predicted_cases_li),
"predicted_max_c" = cumsum(predicted_cases_ui),
"predicted_cases" = predicted_cases,
"predicted_min" = predicted_cases_li,
"predicted_max" = predicted_cases_ui,
"deaths" = deaths_by_country[[i]],
"deaths_c" = cumsum(deaths_by_country[[i]]),
"estimated_deaths_c" = cumsum(estimated_deaths),
"death_min_c" = cumsum(estimated_deaths_li),
"death_max_c"= cumsum(estimated_deaths_ui),
"estimated_deaths" = estimated_deaths,
"death_min" = estimated_deaths_li,
"death_max"= estimated_deaths_ui,
"rt" = rt,
"rt_min" = rt_li,
"rt_max" = rt_ui)
times <- as_date(as.character(dates[[i]]))
if(is.null(forecast)) forecast <- 0
times_forecast <- times[length(times)] + 0:forecast
data_country_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_deaths_forecast,
"death_min_forecast" = estimated_deaths_li_forecast,
"death_max_forecast"= estimated_deaths_ui_forecast)
data_country2 <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_cases_c" = cumsum(estimated_cases),
"cases_min_c" = cumsum(estimated_cases_li),
"cases_max_c"= cumsum(estimated_cases_ui),
"estimated_cases" = estimated_cases,
"cases_min" = estimated_cases_li,
"cases_max"= estimated_cases_ui,
"cases_min2" = estimated_cases_li2,
"cases_max2"= estimated_cases_ui2)
data_cases_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_cases_forecast,
"death_min_forecast" = estimated_cases_li_forecast,
"death_max_forecast"= estimated_cases_ui_forecast)
data_deathsh <- data.frame("time" = as_date(as.character(dates[[i]])),
"deaths" = deathsh_by_country[[i]],
"deaths_c" = cumsum(deathsh_by_country[[i]]),
"estimated_deaths_c" = cumsum(estimated_deathsh),
"death_min_c" = cumsum(estimated_deathsh_li),
"death_max_c"= cumsum(estimated_deathsh_ui),
"estimated_deaths" = estimated_deathsh,
"death_min" = estimated_deathsh_li,
"death_max"= estimated_deathsh_ui,
"death_min2" = estimated_deathsh_li2,
"death_max2"= estimated_deathsh_ui2)
data_deathsh_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_deathsh_forecast,
"death_min_forecast" = estimated_deathsh_li_forecast,
"death_max_forecast"= estimated_deathsh_ui_forecast)
data_deathsc <- data.frame("time" = as_date(as.character(dates[[i]])),
"deaths" = deathsc_by_country[[i]],
"deaths_c" = cumsum(deathsc_by_country[[i]]),
"estimated_deaths_c" = cumsum(estimated_deathsc),
"death_min_c" = cumsum(estimated_deathsc_li),
"death_max_c"= cumsum(estimated_deathsc_ui),
"estimated_deaths" = estimated_deathsc,
"death_min" = estimated_deathsc_li,
"death_max"= estimated_deathsc_ui,
"death_min2" = estimated_deathsc_li2,
"death_max2"= estimated_deathsc_ui2)
data_deathsc_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_deathsc_forecast,
"death_min_forecast" = estimated_deathsc_li_forecast,
"death_max_forecast"= estimated_deathsc_ui_forecast)
data_hosp <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_hosp_c" = cumsum(estimated_hosp),
"hosp_min_c" = cumsum(estimated_hosp_li),
"hosp_max_c"= cumsum(estimated_hosp_ui),
"estimated_hosp" = estimated_hosp,
"hosp_min" = estimated_hosp_li,
"hosp_max"= estimated_hosp_ui,
"hosp_min2" = estimated_hosp_li2,
"hosp_max2"= estimated_hosp_ui2,
"reported_hosp" = hosps_by_country[[i]])
data_hosp_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_hosp_forecast,
"death_min_forecast" = estimated_hosp_li_forecast,
"death_max_forecast"= estimated_hosp_ui_forecast)
data_hospi <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_hosp_c" = cumsum(estimated_hospi),
"hosp_min_c" = cumsum(estimated_hospi_li),
"hosp_max_c"= cumsum(estimated_hospi_ui),
"estimated_hosp" = estimated_hospi,
"hosp_min" = estimated_hospi_li,
"hosp_max"= estimated_hospi_ui,
"hosp_min2" = estimated_hospi_li2,
"hosp_max2"= estimated_hospi_ui2,
"reported_hosp" = hospsin_by_country[[i]])
data_hospi_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_hospi_forecast,
"death_min_forecast" = estimated_hospi_li_forecast,
"death_max_forecast"= estimated_hospi_ui_forecast)
data_hospo <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_hosp_c" = cumsum(estimated_hospo),
"hosp_min_c" = cumsum(estimated_hospo_li),
"hosp_max_c"= cumsum(estimated_hospo_ui),
"estimated_hosp" = estimated_hospo,
"hosp_min" = estimated_hospo_li,
"hosp_max"= estimated_hospo_ui,
"hosp_min2" = estimated_hospo_li2,
"hosp_max2"= estimated_hospo_ui2,
"reported_hosp" = hospsout_by_country[[i]])
data_hospo_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_hospo_forecast,
"death_min_forecast" = estimated_hospo_li_forecast,
"death_max_forecast"= estimated_hospo_ui_forecast)
data_icu <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_icu_c" = cumsum(estimated_icu),
"icu_min_c" = cumsum(estimated_icu_li),
"icu_max_c"= cumsum(estimated_icu_ui),
"estimated_icu" = estimated_icu,
"icu_min" = estimated_icu_li,
"icu_max"= estimated_icu_ui,
"icu_min2" = estimated_icu_li2,
"icu_max2"= estimated_icu_ui2,
"reported_icu" = icus_by_country[[i]])
data_icu_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_icu_forecast,
"death_min_forecast" = estimated_icu_li_forecast,
"death_max_forecast"= estimated_icu_ui_forecast)
data_icui <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_icu_c" = cumsum(estimated_icui),
"icu_min_c" = cumsum(estimated_icui_li),
"icu_max_c"= cumsum(estimated_icui_ui),
"estimated_icu" = estimated_icui,
"icu_min" = estimated_icui_li,
"icu_max"= estimated_icui_ui,
"icu_min2" = estimated_icui_li2,
"icu_max2"= estimated_icui_ui2,
"reported_icu" = icusin_by_country[[i]])
data_icui_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_icui_forecast,
"death_min_forecast" = estimated_icui_li_forecast,
"death_max_forecast"= estimated_icui_ui_forecast)
data_icuo <- data.frame("time" = as_date(as.character(dates[[i]])),
"estimated_icu_c" = cumsum(estimated_icuo),
"icu_min_c" = cumsum(estimated_icuo_li),
"icu_max_c"= cumsum(estimated_icuo_ui),
"estimated_icu" = estimated_icuo,
"icu_min" = estimated_icuo_li,
"icu_max"= estimated_icuo_ui,
"icu_min2" = estimated_icuo_li2,
"icu_max2"= estimated_icuo_ui2,
"reported_icu" = icusout_by_country[[i]])
data_icuo_forecast <- data.frame("time" = times_forecast,
"country" = rep(country, forecast+1),
"estimated_deaths_forecast" = estimated_icuo_forecast,
"death_min_forecast" = estimated_icuo_li_forecast,
"death_max_forecast"= estimated_icuo_ui_forecast)
###
# Add later data:
load("data.Rdata")
day<-strsplit(dd[-1,2],split="\\.")
ff<-function(i,x){
decembr <- which(unlist(lapply(x, function(iter) iter[2]=='12')))
if(identical(integer(0), decembr)){
leto <- '.2020'
} else{
if(max(december)<i){
leto <- '.2021'
} else{
leto <- '.2020'
}
}
paste(x[[i]][1],".",strsplit(day[[i]][2]," ")[[1]][2],leto,sep="")
}
day<-unlist(lapply(1:length(day),ff,day))
day<-as.Date(day,format="%d.%m.%Y")
day<-c((day[1]-1:50)[order(day[1]-1:50)],day)
if(max(dates[[i]]) < max(day)){
# Find all later dates:
wh_dates <- which(day > max(dates[[i]]))
if(forecast<length(wh_dates)){
wh_dates <- wh_dates[1:forecast]
}
# Prepare data:
# Deaths:
deaths<- as.numeric(dd$umrli..vsi.[-1]) #cumulative!
deaths[is.na(deaths)]<-0
deaths<-c(rep(0,50),deaths)
deathsi<-deaths
for (ite in 2:length(deaths)){
deathsi[ite]<-deaths[ite]-deaths[ite-1]
}
deaths<-deathsi
deathsc<-c(rep(0,50), as.numeric(dd$D.u[-1])) #cumulative! prispevek je lahko negativen!
deathsc[is.na(deathsc)] <- 0
deathsh<-c(rep(0,50), as.numeric(dd$H.D[-1])) #cumulative! prispevek je lahko negativen!
deathsh[is.na(deathsh)] <- 0
# Cases:
cases<- c(rep(0,50),as.numeric(dd$potrjeni.danes[-1]))
cases[is.na(cases)]<-0
# Hosp:
hospitalizirani<-as.numeric(dd$hospitalizirani..trenutno.[-1]) #cumulative! prispevek je lahko negativen!
hospitalizirani[is.na(hospitalizirani)]<-0
hospitalizirani<-c(rep(0,50),hospitalizirani)
hospitaliziraniin<-dd$novi[-1]
hospitaliziraniin[is.na(hospitaliziraniin)]<-0
hospitaliziraniin<-c(rep(0,50),hospitaliziraniin)
hospitaliziraniout<-as.numeric(dd$iz.bol..oskrbe..vsi.[-1])
hospitaliziraniout[is.na(hospitaliziraniout)]<-0
hospitaliziraniout<-c(rep(0,50),hospitaliziraniout)
hospitaliziraniouti<-hospitaliziraniout
for (ite in 2:length(hospitaliziraniout)){
hospitaliziraniouti[ite]<-hospitaliziraniout[ite]-hospitaliziraniout[ite-1]
}
hospitaliziraniout<-hospitaliziraniouti
# ICU:
icu<-c(rep(0,50), as.numeric(dd$intenzivna.enota..trenutno.[-1])) #cumulative! prispevek je lahko negativen!
icu[is.na(icu)] <- 0
icuin<-c(rep(0,50), as.numeric(dd$I.i[-1])) #cumulative! prispevek je lahko negativen!
icuin[is.na(icuin)] <- 0
icuout<-c(rep(0,50), as.numeric(dd$I.o[-1])) #cumulative! prispevek je lahko negativen!
icuout[is.na(icuout)] <- 0
later_data <- data.frame("time" = day[wh_dates],
"deaths" = deaths[wh_dates],
"deathsh" = deathsh[wh_dates],
"deathsc" = deathsc[wh_dates],
"cases" = cases[wh_dates],
"hosp" = hospitalizirani[wh_dates],
"hospi" = hospitaliziraniin[wh_dates],
"hospo" = hospitaliziraniout[wh_dates],
"icu" = icu[wh_dates],
"icui" = icuin[wh_dates],
"icuo" = icuout[wh_dates])
} else{
later_data <- data.frame()
}
###
posteriors_out_of_time <- list(
estimated.deaths=estimated.deaths[,(N+1):N2,i],
estimated.deathsh=estimated.deathsh[,(N+1):N2,i],
estimated.deathsc=estimated.deathsc[,(N+1):N2,i],
estimated.cases=estimated.cases[,(N+1):N2,i],
estimated.hospitals=estimated.hospitals[,(N+1):N2,i],
estimated.hospitalsi=estimated.hospitalsi[,(N+1):N2,i],
estimated.hospitalso=estimated.hospitalso[,(N+1):N2,i],
estimated.icus=estimated.icus[,(N+1):N2,i],
estimated.icusi=estimated.icusi[,(N+1):N2,i],
estimated.icuso=estimated.icuso[,(N+1):N2,i])
save(posteriors_out_of_time, file=paste0("combinedTDI/results/", country, "-posteriors-", filename, ".RData"))
###
make_single_plot(data_country = data_country, data_country2 = data_country2,
data_deathsh = data_deathsh, data_deathsc = data_deathsc,
data_hosp = data_hosp, data_hospi = data_hospi, data_hospo = data_hospo,
data_icu = data_icu, data_icui = data_icui, data_icuo = data_icuo,
data_country_forecast = data_country_forecast,
data_deathsh_forecast = data_deathsh_forecast,
data_deathsc_forecast = data_deathsc_forecast,
data_cases_forecast = data_cases_forecast,
data_hosp_forecast = data_hosp_forecast,
data_hospi_forecast = data_hospi_forecast,
data_hospo_forecast = data_hospo_forecast,
data_icu_forecast = data_icu_forecast,
data_icui_forecast = data_icui_forecast,
data_icuo_forecast = data_icuo_forecast,
filename = filename,
country = country,
model_input = model_input,
later_data = later_data)
}
}
make_single_plot <- function(data_country, data_country2,
data_deathsh, data_deathsc,
data_hosp, data_hospi, data_hospo,
data_icu, data_icui, data_icuo,
data_country_forecast,
data_deathsh_forecast, data_deathsc_forecast,
data_cases_forecast,
data_hosp_forecast, data_hospi_forecast, data_hospo_forecast,
data_icu_forecast, data_icui_forecast, data_icuo_forecast,
filename, country, model_input, later_data){
language_english <- TRUE
data_deaths <- data_country %>%
select(time, deaths, estimated_deaths) %>%
gather("key" = key, "value" = value, -time)
data_deaths_forecast <- data_country_forecast %>%
select(time, estimated_deaths_forecast) %>%
gather("key" = key, "value" = value, -time)
# Force less than 1 case to zero
data_deaths$value[data_deaths$value < 1] <- NA
data_deaths_forecast$value[data_deaths_forecast$value < 1] <- NA
data_deaths_all <- rbind(data_deaths, data_deaths_forecast)
# Lab texts:
if(language_english) xlab_text <- 'Date'
else xlab_text <- 'Datum'
if(language_english) ylab_text_deaths <- 'Daily number of deaths\n'
else ylab_text_deaths <- "Dnevno stevilo umrlih\n"
## Deaths:
p1 <- ggplot(data_country) +
geom_bar(data = data_country, aes(x = time, y = deaths),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p1 <- p1 +
geom_bar(data = later_data, aes(x = time, y = deaths),
fill = "coral4", stat='identity', alpha=0.5)
}
p1 <- p1 +
geom_line(data = data_country, aes(x = time, y = estimated_deaths),
col = "deepskyblue4") +
geom_line(data = data_country_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_country, aes(x = time,
ymin = death_min,
ymax = death_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_country_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_deaths) +
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_country$death_max, data_country$deaths, data_country_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_country$death_max, data_country$deaths, data_country_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_cases <- "Daily number of infections\n"
else ylab_text_cases <- "Dnevno stevilo potrjeno okuzenih\n"
## Deaths H:
# Lab texts:
if(language_english) xlab_text <- 'Date'
else xlab_text <- 'Datum'
if(language_english) ylab_text_deaths <- 'Deaths H\n'
else ylab_text_deaths <- "Dnevno stevilo umrlih\n"
p1_h <- ggplot(data_deathsh) +
geom_bar(data = data_deathsh, aes(x = time, y = deaths),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p1_h <- p1_h +
geom_bar(data = later_data, aes(x = time, y = deathsh),
fill = "coral4", stat='identity', alpha=0.5)
}
p1_h <- p1_h +
geom_line(data = data_deathsh, aes(x = time, y = estimated_deaths),
col = "deepskyblue4") +
geom_line(data = data_deathsh_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_deathsh, aes(x = time,
ymin = death_min,
ymax = death_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_deathsh_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_deaths) +
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_deathsh$death_max, data_deathsh$deaths, data_deathsh_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_deathsh$death_max, data_deathsh$deaths, data_deathsh_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
## Deaths C:
# Lab texts:
if(language_english) xlab_text <- 'Date'
else xlab_text <- 'Datum'
if(language_english) ylab_text_deaths <- 'Deaths C\n'
else ylab_text_deaths <- "Dnevno stevilo umrlih\n"
p1_c <- ggplot(data_deathsc) +
geom_bar(data = data_deathsc, aes(x = time, y = deaths),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p1_c <- p1_c +
geom_bar(data = later_data, aes(x = time, y = deathsc),
fill = "coral4", stat='identity', alpha=0.5)
}
p1_c <- p1_c +
geom_line(data = data_deathsc, aes(x = time, y = estimated_deaths),
col = "deepskyblue4") +
geom_line(data = data_deathsc_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_deathsc, aes(x = time,
ymin = death_min,
ymax = death_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_deathsc_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_deaths) +
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_deathsc$death_max, data_deathsc$deaths, data_deathsc_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_deathsc$death_max, data_deathsc$deaths, data_deathsc_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_cases <- "Daily number of confirmed infections\n"
else ylab_text_cases <- "Dnevno stevilo potrjeno okuzenih\n"
## Cases:
p2 <- ggplot(data_country) +
geom_bar(data = data_country, aes(x = time, y = reported_cases),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p2 <- p2 +
geom_bar(data = later_data, aes(x = time, y = cases),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p2 <- p2 +
geom_line(data = data_country2, aes(x = time, y = estimated_cases),
col = "deepskyblue4") +
geom_line(data = data_cases_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_country2, aes(x = time,
ymin = cases_min,
ymax = cases_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_cases_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_cases) +
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_country2$cases_max, data_country$reported_cases, data_cases_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_country2$cases_max, data_country$reported_cases, data_cases_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_hosp <- "Number of hospitalized patients\n"
else ylab_text_hosp <- "Stevilo hospitaliziranih\n"
## Hospitalizations:
p3 <- ggplot(data_country) +
geom_bar(data = data_hosp, aes(x = time, y = reported_hosp),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p3 <- p3 +
geom_bar(data = later_data, aes(x = time, y = hosp),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p3 <- p3 +
geom_line(data = data_hosp, aes(x = time, y = estimated_hosp),
col = "deepskyblue4") +
geom_line(data = data_hosp_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_hosp, aes(x = time,
ymin = hosp_min,
ymax = hosp_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_hosp_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_hosp)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_hosp$hosp_max, data_hosp$reported_hosp, data_hosp_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_hosp$hosp_max, data_hosp$reported_hosp, data_hosp_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_hosp <- "Hospitalized IN\n"
else ylab_text_hosp <- "Stevilo hospitaliziranih\n"
## Hospitalizations IN:
p3_IN <- ggplot(data_country) +
geom_bar(data = data_hospi, aes(x = time, y = reported_hosp),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p3_IN <- p3_IN +
geom_bar(data = later_data, aes(x = time, y = hospi),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p3_IN <- p3_IN +
geom_line(data = data_hospi, aes(x = time, y = estimated_hosp),
col = "deepskyblue4") +
geom_line(data = data_hospi_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_hospi, aes(x = time,
ymin = hosp_min,
ymax = hosp_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_hospi_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_hosp)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_hospi$hosp_max, data_hospi$reported_hosp, data_hospi_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_hospi$hosp_max, data_hospi$reported_hosp, data_hospi_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_hosp <- "Hospitalized OUT\n"
else ylab_text_hosp <- "Stevilo hospitaliziranih\n"
## Hospitalizations OUT:
p3_OUT <- ggplot(data_country) +
geom_bar(data = data_hospo, aes(x = time, y = reported_hosp),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p3_OUT <- p3_OUT +
geom_bar(data = later_data, aes(x = time, y = hospo),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p3_OUT <- p3_OUT +
geom_line(data = data_hospo, aes(x = time, y = estimated_hosp),
col = "deepskyblue4") +
geom_line(data = data_hospo_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_hospo, aes(x = time,
ymin = hosp_min,
ymax = hosp_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_hospo_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_hosp)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_hospo$hosp_max, data_hospo$reported_hosp, data_hospo_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_hospo$hosp_max, data_hospo$reported_hosp, data_hospo_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_icu <- "Number of patients in ICU\n"
else ylab_text_icu <- "Stevilo bolnikov na intenzivni negi\n"
## ICUs:
p4 <- ggplot(data_country) +
geom_bar(data = data_icu, aes(x = time, y = reported_icu),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p4 <- p4 +
geom_bar(data = later_data, aes(x = time, y = icu),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p4 <- p4 +
geom_line(data = data_icu, aes(x = time, y = estimated_icu),
col = "deepskyblue4") +
geom_line(data = data_icu_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_icu, aes(x = time,
ymin = icu_min,
ymax = icu_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_icu_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_icu)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_icu$icu_max, data_icu$reported_icu, data_icu_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_icu$icu_max, data_icu$reported_icu, data_icu_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_icu <- "ICU IN\n"
else ylab_text_icu <- "Stevilo bolnikov na intenzivni negi\n"
## ICUs IN:
p4_IN <- ggplot(data_country) +
geom_bar(data = data_icui, aes(x = time, y = reported_icu),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p4_IN <- p4_IN +
geom_bar(data = later_data, aes(x = time, y = icui),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p4_IN <- p4_IN +
geom_line(data = data_icui, aes(x = time, y = estimated_icu),
col = "deepskyblue4") +
geom_line(data = data_icui_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_icui, aes(x = time,
ymin = icu_min,
ymax = icu_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_icui_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_icu)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_icui$icu_max, data_icui$reported_icu, data_icui_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_icui$icu_max, data_icui$reported_icu, data_icui_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
if(language_english) ylab_text_icu <- "ICU OUT\n"
else ylab_text_icu <- "Stevilo bolnikov na intenzivni negi\n"
## ICUs OUT:
p4_OUT <- ggplot(data_country) +
geom_bar(data = data_icuo, aes(x = time, y = reported_icu),
fill = "coral4", stat='identity', alpha=0.5)
if(!identical(later_data, data.frame())){
p4_OUT <- p4_OUT +
geom_bar(data = later_data, aes(x = time, y = icuo),
fill = "coral4", stat='identity', alpha=0.5)
}
# if(data_country_forecast$country[1]=='Slovenia'){
# p <- p +
# geom_bar(data = data_country_forecast[2:nrow(data_country_forecast),], aes(x = time, y = deaths),
# fill = "coral4", stat='identity', alpha=0.5)
# }
p4_OUT <- p4_OUT +
geom_line(data = data_icuo, aes(x = time, y = estimated_icu),
col = "deepskyblue4") +
geom_line(data = data_icuo_forecast,
aes(x = time, y = estimated_deaths_forecast),
col = "black", alpha = 0.5) +
geom_ribbon(data = data_icuo, aes(x = time,
ymin = icu_min,
ymax = icu_max),
fill="deepskyblue4", alpha=0.3) +
geom_ribbon(data = data_icuo_forecast,
aes(x = time,
ymin = death_min_forecast,
ymax = death_max_forecast),
fill = "black", alpha=0.35) +
geom_vline(xintercept = data_deaths$time[length(data_deaths$time)],
col = "black", linetype = "dashed", alpha = 0.5) +
#scale_fill_manual(name = "",
# labels = c("Confirmed deaths", "Predicted deaths"),
# values = c("coral4", "deepskyblue4")) +
xlab(xlab_text) +
ylab(ylab_text_icu)+
scale_x_date(date_breaks = "weeks", labels = date_format("%e %b")) +
# scale_y_continuous(trans='log10', labels=comma) +
scale_y_continuous(breaks = seq(0, ceiling(max(c(data_icuo$icu_max, data_icuo$reported_icu, data_icuo_forecast$death_max_forecast))), length.out = 6))+
coord_cartesian(ylim = c(0, ceiling(max(c(data_icuo$icu_max, data_icuo$reported_icu, data_icuo_forecast$death_max_forecast)))), expand = FALSE) +
# scale_y_continuous(breaks = c(0,2,4,6,8,10))+
# coord_cartesian(ylim = c(0, 10), expand = FALSE) +
theme_pubr(base_family="sans") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill=guide_legend(ncol=1, reverse = TRUE)) +
annotate(geom="text", x=data_country$time[length(data_country$time)]+8,
y=10000, label="",
color="black")
p <- plot_grid(p1,p2, p3, p4,
p1_h, p1_c, p3_IN, p3_OUT, p4_IN, p4_OUT,
ncol = 4)
save_plot(filename = paste0("combinedTDI/figures/", country, "_forecast_", filename, ".pdf"),
p, base_width = 10*4/3, base_height=7*3/2)
# Produce plots for Website
dir.create("combinedTDI/web/figures/desktop/", showWarnings = FALSE, recursive = TRUE)
save_plot(filename = paste0("combinedTDI/web/figures/desktop/", country, "_forecast_", filename, ".svg"),
p, base_width = 14, base_height=7)
dir.create("combinedTDI/web/figures/mobile/", showWarnings = FALSE, recursive = TRUE)
save_plot(filename = paste0("combinedTDI/web/figures/mobile/", country, "_forecast_", filename, ".svg"),
p, base_width = 10, base_height=7
# base_height = 4, base_asp = 1.1
)
###
# Plot only 4 graphs:
p <- plot_grid(p1,p2, p3, p4,
ncol = 4)
save_plot(filename = paste0("combinedTDI/figures/", country, "_forecast4_", filename, ".pdf"),
p, base_width = 14)
# Produce plots for Website
dir.create("combinedTDI/web/figures/desktop/", showWarnings = FALSE, recursive = TRUE)
save_plot(filename = paste0("combinedTDI/web/figures/desktop/", country, "_forecast4_", filename, ".svg"),
p, base_width = 14#, base_height=7
)
dir.create("combinedTDI/web/figures/mobile/", showWarnings = FALSE, recursive = TRUE)
save_plot(filename = paste0("combinedTDI/web/figures/mobile/", country, "_forecast4_", filename, ".svg"),
p, base_width = 14#, base_height=7
# base_height = 4, base_asp = 1.1
)
# Additional plot for ibmi html:
if(file.exists(paste0("combinedTDI/results/", filename, '_', "graphs.RData"))){
p6 <- p4
p5 <- p3
p4 <- p2
p2 <- p1
load(file=paste0("combinedTDI/results/", filename, '_', "graphs.RData"))
p3_mobile <- p3_mobile +
theme(legend.text=element_text(size=7))
p_grouped <- plot_grid(p1, p4, p3_mobile,
ncol = 3, rel_widths = c(1, 1, 1))
p_grouped2 <- plot_grid(p2, p1_h, p1_c,
ncol = 3, rel_widths = c(1,1,1))
p_grouped3 <- plot_grid(p5, p3_IN, p3_OUT,
ncol = 3, rel_widths = c(1,1,1))
p_grouped4 <- plot_grid(p6, p4_IN, p4_OUT,
ncol = 3, rel_widths = c(1,1,1))
p <- plot_grid(p_grouped, p_grouped2,
p_grouped3, p_grouped4, nrow=4)
# cowplot::save_plot(filename = paste0("figures/", country, "_together_", filename, ".pdf"),
# p, base_width = 14, base_height=7.42)
save_plot(filename = paste0("combinedTDI/web/figures/mobile/", country, "_together_", filename, ".svg"),
p, base_width = 14, base_height=7.42*2
)
}
# Calculate MSE:
mse_df <- data.frame(forecast=0,
deaths_MSE=0,
deathsh_MSE=0,
deathsc_MSE=0,
cases_MSE=0,
hosp_MSE=0,
hospi_MSE=0,
hospo_MSE=0,
icu_MSE=0,
icui_MSE=0,
icuo_MSE=0)
if(nrow(later_data)>0){
tajms <- c(1, 3, 5, 7, 10, 14)
tajms <- tajms[tajms<=nrow(later_data)]
# This is: (\hat{Y_i} - Y_i)^2 / \hat{Y_i}^2
for(for_i in tajms){
deaths_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$deaths[iti] - data_country_forecast$estimated_deaths_forecast[iti+1])^2/data_country_forecast$estimated_deaths_forecast[iti+1]))
deathsh_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$deathsh[iti] - data_deathsh_forecast$estimated_deaths_forecast[iti+1])^2/data_deathsh_forecast$estimated_deaths_forecast[iti+1]))
deathsc_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$deathsc[iti] - data_deathsc_forecast$estimated_deaths_forecast[iti+1])^2/data_deathsc_forecast$estimated_deaths_forecast[iti+1]))
cases_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$cases[iti] - data_cases_forecast$estimated_deaths_forecast[iti+1])^2/data_cases_forecast$estimated_deaths_forecast[iti+1]))
hosp_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$hosp[iti] - data_hosp_forecast$estimated_deaths_forecast[iti+1])^2/data_hosp_forecast$estimated_deaths_forecast[iti+1]))
hospi_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$hospi[iti] - data_hospi_forecast$estimated_deaths_forecast[iti+1])^2/data_hospi_forecast$estimated_deaths_forecast[iti+1]))
hospo_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$hospo[iti] - data_hospo_forecast$estimated_deaths_forecast[iti+1])^2/data_hospo_forecast$estimated_deaths_forecast[iti+1]))
icu_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$icu[iti] - data_icu_forecast$estimated_deaths_forecast[iti+1])^2/data_icu_forecast$estimated_deaths_forecast[iti+1]))
icui_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$icui[iti] - data_icui_forecast$estimated_deaths_forecast[iti+1])^2/data_icui_forecast$estimated_deaths_forecast[iti+1]))
icuo_MSE <- mean(sapply(1:for_i, function(iti)
(later_data$icuo[iti] - data_icuo_forecast$estimated_deaths_forecast[iti+1])^2/data_icuo_forecast$estimated_deaths_forecast[iti+1]))
mse_df_tmp <- data.frame(forecast=for_i,
deaths_MSE=deaths_MSE,
deathsh_MSE=deathsh_MSE,
deathsc_MSE=deathsc_MSE,
cases_MSE=cases_MSE,
hosp_MSE=hosp_MSE,
hospi_MSE=hospi_MSE,
hospo_MSE=hospo_MSE,
icu_MSE=icu_MSE,
icui_MSE=icui_MSE,
icuo_MSE=icuo_MSE)
mse_df <- rbind(mse_df,
mse_df_tmp)
}
mse_df <- mse_df[-1,]
}
save(mse_df, file=paste0("combinedTDI/results/", country, "-MSE-", filename, ".RData"))
}
#-----------------------------------------------------------------------------------------------
make_forecast_plot()
|
#' Getting voting's results for each club
#'
#' Function \code{votes_get_results} gets voting's results for each club.
#'
#' @details
#' Function \code{votes_get_results} gets voting's results for each club.
#' Example of page with voting's results of PO club:
#' http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?symbol=klubglos&
#' IdGlosowania=37494&KodKlubu=PO
#'
#' @usage votes_get_results(page)
#'
#' @param page club's voting's results page
#'
#' @return data frame with two columns: deputy, vote
#'
#' @examples
#' \dontrun{
#' page <- paste0('http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?',
#' 'symbol=klubglos&IdGlosowania=37494&KodKlubu=PO')
#' votes_get_results(page)}
#'
#' @note
#' All information is stored in PostgreSQL database.
#'
#' @author Piotr Smuda
#'
#' @export
#'
votes_get_results <- function(page) {
stopifnot(is.character(page))
# getting deputies and their votes
votes_clubs_results <- safe_readHTMLTable(page, encoding = "UTF-8", stringsAsFactors = FALSE)[[1]]
if(ncol(votes_clubs_results) == 6) {
deputies <- c(votes_clubs_results[, 2], votes_clubs_results[, 5])
deputies <- deputies[!is.na(deputies)]
deputies_votes <- c(votes_clubs_results[, 3], votes_clubs_results[, 6])
deputies_votes <- deputies_votes[!is.na(deputies_votes)]
} else {
deputies <- votes_clubs_results[, 2]
deputies_votes <- votes_clubs_results[, 3]
}
# creating data frame with data
votes_results <- data.frame(deputy = deputies, vote = deputies_votes, stringsAsFactors = FALSE)
return(votes_results)
}
| /R/votes_get_results.R | no_license | cran/sejmRP | R | false | false | 1,651 | r | #' Getting voting's results for each club
#'
#' Function \code{votes_get_results} gets voting's results for each club.
#'
#' @details
#' Function \code{votes_get_results} gets voting's results for each club.
#' Example of page with voting's results of PO club:
#' http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?symbol=klubglos&
#' IdGlosowania=37494&KodKlubu=PO
#'
#' @usage votes_get_results(page)
#'
#' @param page club's voting's results page
#'
#' @return data frame with two columns: deputy, vote
#'
#' @examples
#' \dontrun{
#' page <- paste0('http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?',
#' 'symbol=klubglos&IdGlosowania=37494&KodKlubu=PO')
#' votes_get_results(page)}
#'
#' @note
#' All information is stored in PostgreSQL database.
#'
#' @author Piotr Smuda
#'
#' @export
#'
votes_get_results <- function(page) {
stopifnot(is.character(page))
# getting deputies and their votes
votes_clubs_results <- safe_readHTMLTable(page, encoding = "UTF-8", stringsAsFactors = FALSE)[[1]]
if(ncol(votes_clubs_results) == 6) {
deputies <- c(votes_clubs_results[, 2], votes_clubs_results[, 5])
deputies <- deputies[!is.na(deputies)]
deputies_votes <- c(votes_clubs_results[, 3], votes_clubs_results[, 6])
deputies_votes <- deputies_votes[!is.na(deputies_votes)]
} else {
deputies <- votes_clubs_results[, 2]
deputies_votes <- votes_clubs_results[, 3]
}
# creating data frame with data
votes_results <- data.frame(deputy = deputies, vote = deputies_votes, stringsAsFactors = FALSE)
return(votes_results)
}
|
library(harmony)
library(cowplot)
library(Seurat)
library(magrittr)
library(ggplot2)
rm(list=ls())
##########################################################
# read data
b1_exprs <- readRDS("data set/PBMC_dataset/data1.rds")
b2_exprs <- readRDS("data set/PBMC_dataset/data2.rds")
b1_celltype <- readRDS("data set/PBMC_dataset/data1_celltype.rds")
b2_celltype <- readRDS("data set/PBMC_dataset/data2_celltype.rds")
rownames(b1_celltype) <- gsub("-", ".", x = b1_celltype[, 1])
b1_celltype <- b1_celltype[, 2, drop = FALSE]
rownames(b2_celltype) <- gsub("-", ".", x = b2_celltype[, 1])
b2_celltype <- b2_celltype[, 2, drop = FALSE]
b1_celltype <- b1_celltype[colnames(b1_exprs), , drop = FALSE]
b2_celltype <- b2_celltype[colnames(b2_exprs), , drop = FALSE]
b1_metadata <- as.data.frame(b1_celltype)
b2_metadata <- as.data.frame(b2_celltype)
b1_metadata$cell <- rownames(b1_celltype)
b2_metadata$cell <- rownames(b2_celltype)
b1_metadata$batch <- 1
b2_metadata$batch <- 2
b1_metadata$batchlb <- 'Batch_1'
b2_metadata$batchlb <- 'Batch_2'
expr_mat = cbind(b1_exprs,b2_exprs)
metadata = rbind(b1_metadata, b2_metadata)
expr_mat <- expr_mat[, rownames(metadata)]
#####################################################
## simulation 1 remove "NK cell" and "Monocyte_FCGR3A" in two dataset respectively
b1_exprs_sim1 = b1_exprs[, b1_celltype != "NK cell"]
b2_exprs_sim1 = b2_exprs[, b2_celltype != "Monocyte_FCGR3A"]
expr_mat_sim1 = cbind(b1_exprs_sim1,b2_exprs_sim1)
metadata_sim1 = rbind(b1_metadata[b1_celltype != "NK cell",], b2_metadata[b2_celltype != "Monocyte_FCGR3A",])
## simulation 2
#B cell (1199,1172), CD4 T cell (2267,2183), CD8 T cell(2076, 1066), Monocyte_CD14 (1914, 2176)
cell_names = c("B cell", "CD4 T cell", "CD8 T cell", "Monocyte_CD14")
n1_sub = c(1000,1000,300,300)
n2_sub = c(300,300,1000,1000)
index1 = NA
index2 = NA
group_label1 = NA
group_label2 = NA
n1 = dim(b1_celltype)[1]
n2 = dim(b2_celltype)[1]
set.seed(123)
for(i in 1:4){
index1 = c(index1, sample(c(1:n1)[c(b1_celltype$cell_type %in% cell_names[i])],n1_sub[i]))
index2 = c(index2, sample(c(1:n2)[c(b2_celltype$cell_type %in% cell_names[i])],n2_sub[i]))
group_label1 = c(group_label1, rep(i, n1_sub[i]))
group_label2 = c(group_label2, rep(i, n2_sub[i]))
}
index1 = index1[-1]
index2 = index2[-1]
group_label1 = group_label1[-1]
group_label2 = group_label2[-1]
b1_exprs_sim2 = b1_exprs[, index1]
b2_exprs_sim2 = b2_exprs[, index2]
expr_mat_sim2 = cbind(b1_exprs_sim2,b2_exprs_sim2)
metadata_sim2 = rbind(b1_metadata[index1,], b2_metadata[index2,])
metadata_sim2$cell_type_index = c(group_label1, group_label2)
##########################################################
# run pipeline
source("R_Code/harmony/harmony_utility.R")
expr_mat = expr_mat_sim1
metadata = metadata_sim1
# data preprocess
npcs = 100
batch_label = "batchlb"
celltype_label = "cell_type"
b_seurat = data_preprocess(obj = expr_mat, min.cells = 10, min.features = 300, percent.mt = 10, oversd = NULL,
normalization.method = "LogNormalize", scale.factor = 10000, selection.method = "vst",
nfeatures = 2000, npcs = npcs, metadata = metadata)
# plot before removing batch effect
b_seurat <- RunTSNE(b_seurat, reduction = "pca", seed.use = 10, dim.embed = 2, dims = 1:100)
b_seurat <- RunUMAP(b_seurat, reduction = "pca", n.components = 2, seed.use = 10 , dims = 1:100)
DimPlot(object = b_seurat, dims = c(1,2), reduction = "tsne", pt.size = 0.5, group.by = batch_label)
DimPlot(object = b_seurat, dims = c(1,2), reduction = "umap", pt.size = 0.5, group.by = batch_label)
##########################################################
# harmony in Seurat
b_seurat = RunHarmony(b_seurat, group.by.vars = batch_label,
theta_harmony = 2, numclust = 50, max_iter_cluster = 100)
b_seurat <- RunTSNE(b_seurat, reduction = "harmony", seed.use = 10, dim.embed = 2, dims = 1:100)
b_seurat <- RunUMAP(b_seurat, reduction = "harmony", n.components = 2, seed.use = 10 , dims = 1:100)
p11 <- DimPlot(object = b_seurat, dims = c(1,2), reduction = "tsne", pt.size = 0.5, group.by = batch_label)
p12 <- DimPlot(object = b_seurat, dims = c(1,2), reduction = "tsne", pt.size = 0.5, group.by = celltype_label)
print(p11 + p12)
p21 <- DimPlot(object = b_seurat, dims = c(1,2), reduction = "umap", pt.size = 0.5, group.by = batch_label)
p22 <- DimPlot(object = b_seurat, dims = c(1,2), reduction = "umap", pt.size = 0.5, group.by = celltype_label)
print(p21 + p22)
# save results
png("R_Code/results/harmony_results/pbmc/pbmc_tsne_harmony_sim1.png",width = 2*1000, height = 800, res = 2*72)
print(p11 + p12)
dev.off()
png("R_Code/results/harmony_results/pbmc/pbmc_umap_harmony_sim1.png",width = 2*1000, height = 800, res = 2*72)
print(p21 + p22)
dev.off()
batch_harmony = b_seurat$batch
cell_type_harmony = b_seurat$cell_type
saveRDS(b_seurat@reductions$harmony@cell.embeddings, "R_Code/results/harmony_results/pbmc/pbmc_harmony_sim1.rds")
save(batch_harmony, cell_type_harmony, file = "R_Code/results/harmony_results/pbmc/harmony_metadata_sim1.Rdata")
| /R_Code/Yan_code/generate_simulation_data.R | no_license | guanxunli/MA_rm_batch_effect | R | false | false | 5,115 | r | library(harmony)
library(cowplot)
library(Seurat)
library(magrittr)
library(ggplot2)
rm(list=ls())
##########################################################
# read data
b1_exprs <- readRDS("data set/PBMC_dataset/data1.rds")
b2_exprs <- readRDS("data set/PBMC_dataset/data2.rds")
b1_celltype <- readRDS("data set/PBMC_dataset/data1_celltype.rds")
b2_celltype <- readRDS("data set/PBMC_dataset/data2_celltype.rds")
rownames(b1_celltype) <- gsub("-", ".", x = b1_celltype[, 1])
b1_celltype <- b1_celltype[, 2, drop = FALSE]
rownames(b2_celltype) <- gsub("-", ".", x = b2_celltype[, 1])
b2_celltype <- b2_celltype[, 2, drop = FALSE]
b1_celltype <- b1_celltype[colnames(b1_exprs), , drop = FALSE]
b2_celltype <- b2_celltype[colnames(b2_exprs), , drop = FALSE]
b1_metadata <- as.data.frame(b1_celltype)
b2_metadata <- as.data.frame(b2_celltype)
b1_metadata$cell <- rownames(b1_celltype)
b2_metadata$cell <- rownames(b2_celltype)
b1_metadata$batch <- 1
b2_metadata$batch <- 2
b1_metadata$batchlb <- 'Batch_1'
b2_metadata$batchlb <- 'Batch_2'
expr_mat = cbind(b1_exprs,b2_exprs)
metadata = rbind(b1_metadata, b2_metadata)
expr_mat <- expr_mat[, rownames(metadata)]
#####################################################
## simulation 1 remove "NK cell" and "Monocyte_FCGR3A" in two dataset respectively
b1_exprs_sim1 = b1_exprs[, b1_celltype != "NK cell"]
b2_exprs_sim1 = b2_exprs[, b2_celltype != "Monocyte_FCGR3A"]
expr_mat_sim1 = cbind(b1_exprs_sim1,b2_exprs_sim1)
metadata_sim1 = rbind(b1_metadata[b1_celltype != "NK cell",], b2_metadata[b2_celltype != "Monocyte_FCGR3A",])
## simulation 2
#B cell (1199,1172), CD4 T cell (2267,2183), CD8 T cell(2076, 1066), Monocyte_CD14 (1914, 2176)
cell_names = c("B cell", "CD4 T cell", "CD8 T cell", "Monocyte_CD14")
n1_sub = c(1000,1000,300,300)
n2_sub = c(300,300,1000,1000)
index1 = NA
index2 = NA
group_label1 = NA
group_label2 = NA
n1 = dim(b1_celltype)[1]
n2 = dim(b2_celltype)[1]
set.seed(123)
for(i in 1:4){
index1 = c(index1, sample(c(1:n1)[c(b1_celltype$cell_type %in% cell_names[i])],n1_sub[i]))
index2 = c(index2, sample(c(1:n2)[c(b2_celltype$cell_type %in% cell_names[i])],n2_sub[i]))
group_label1 = c(group_label1, rep(i, n1_sub[i]))
group_label2 = c(group_label2, rep(i, n2_sub[i]))
}
index1 = index1[-1]
index2 = index2[-1]
group_label1 = group_label1[-1]
group_label2 = group_label2[-1]
b1_exprs_sim2 = b1_exprs[, index1]
b2_exprs_sim2 = b2_exprs[, index2]
expr_mat_sim2 = cbind(b1_exprs_sim2,b2_exprs_sim2)
metadata_sim2 = rbind(b1_metadata[index1,], b2_metadata[index2,])
metadata_sim2$cell_type_index = c(group_label1, group_label2)
##########################################################
# run pipeline
source("R_Code/harmony/harmony_utility.R")
expr_mat = expr_mat_sim1
metadata = metadata_sim1
# data preprocess
npcs = 100
batch_label = "batchlb"
celltype_label = "cell_type"
b_seurat = data_preprocess(obj = expr_mat, min.cells = 10, min.features = 300, percent.mt = 10, oversd = NULL,
normalization.method = "LogNormalize", scale.factor = 10000, selection.method = "vst",
nfeatures = 2000, npcs = npcs, metadata = metadata)
# plot before removing batch effect
b_seurat <- RunTSNE(b_seurat, reduction = "pca", seed.use = 10, dim.embed = 2, dims = 1:100)
b_seurat <- RunUMAP(b_seurat, reduction = "pca", n.components = 2, seed.use = 10 , dims = 1:100)
DimPlot(object = b_seurat, dims = c(1,2), reduction = "tsne", pt.size = 0.5, group.by = batch_label)
DimPlot(object = b_seurat, dims = c(1,2), reduction = "umap", pt.size = 0.5, group.by = batch_label)
##########################################################
# harmony in Seurat
b_seurat = RunHarmony(b_seurat, group.by.vars = batch_label,
theta_harmony = 2, numclust = 50, max_iter_cluster = 100)
b_seurat <- RunTSNE(b_seurat, reduction = "harmony", seed.use = 10, dim.embed = 2, dims = 1:100)
b_seurat <- RunUMAP(b_seurat, reduction = "harmony", n.components = 2, seed.use = 10 , dims = 1:100)
p11 <- DimPlot(object = b_seurat, dims = c(1,2), reduction = "tsne", pt.size = 0.5, group.by = batch_label)
p12 <- DimPlot(object = b_seurat, dims = c(1,2), reduction = "tsne", pt.size = 0.5, group.by = celltype_label)
print(p11 + p12)
p21 <- DimPlot(object = b_seurat, dims = c(1,2), reduction = "umap", pt.size = 0.5, group.by = batch_label)
p22 <- DimPlot(object = b_seurat, dims = c(1,2), reduction = "umap", pt.size = 0.5, group.by = celltype_label)
print(p21 + p22)
# save results
png("R_Code/results/harmony_results/pbmc/pbmc_tsne_harmony_sim1.png",width = 2*1000, height = 800, res = 2*72)
print(p11 + p12)
dev.off()
png("R_Code/results/harmony_results/pbmc/pbmc_umap_harmony_sim1.png",width = 2*1000, height = 800, res = 2*72)
print(p21 + p22)
dev.off()
batch_harmony = b_seurat$batch
cell_type_harmony = b_seurat$cell_type
saveRDS(b_seurat@reductions$harmony@cell.embeddings, "R_Code/results/harmony_results/pbmc/pbmc_harmony_sim1.rds")
save(batch_harmony, cell_type_harmony, file = "R_Code/results/harmony_results/pbmc/harmony_metadata_sim1.Rdata")
|
# This is an R script to make a PDF of all the knots in the package.
# Compare file knotplotter.R. which plots knots up to seven crossings
# and creates knot_table.pdf
# I can't figure out a way to do plot all knots automatically from the
# contents of the data/ directory. To create this file from scratch,
# use something like:
# for i in ../data/*.R ; do sed -n "1p" $i |tr -d '`<-' ; done
# Use this to generate a text file of all knot names. Then add the
# three setup lines below, which define function f(), load the
# package, and open a PDF file. Then use your friendly neighbourhood
# emacs to change "foo" to "f('foo')"; then add "dev.off()" at the
# end.
## setup lines start
f <- function(s){knotplot(get(s),main=s)}
library("knotR")
pdf(file="allknots.pdf")
## setup lines end
f('k10_123')
f('k10_1')
f('k10_47')
f('k10_61')
f('k12a1202')
f('k12n838')
f('k3_1a')
f('k3_1')
f('k4_1a')
f('k4_1')
f('k5_1')
f('k5_2')
f('k6_1')
f('k6_2')
f('k6_3')
f('k7_1')
f('k7_2')
f('k7_3')
f('k7_4')
f('k7_5')
f('k7_6')
f('k7_7a')
f('k7_7')
f('k8_10')
f('k8_11')
f('k8_12')
f('k8_13')
f('k8_14')
f('k8_15')
f('k8_16')
f('k8_17')
f('k8_18')
f('k8_19a')
f('k8_19')
f('k8_1')
f('k8_20')
f('k8_21')
f('k8_2')
f('k8_3')
f('k8_4a')
f('k8_4')
f('k8_5')
f('k8_6')
f('k8_7')
f('k8_8')
f('k8_9')
f('k9_10')
f('k9_11')
f('k9_12')
f('k9_13')
f('k9_14')
f('k9_15')
f('k9_16')
f('k9_17')
f('k9_18')
f('k9_19')
f('k9_1')
f('k9_20')
f('k9_21')
f('k9_22')
f('k9_23a')
f('k9_23')
f('k9_24')
f('k9_25')
f('k9_26')
f('k9_27')
f('k9_28')
f('k9_29')
f('k9_2')
f('k9_30')
f('k9_31')
f('k9_32')
f('k9_33')
f('k9_34')
f('k9_35')
f('k9_36')
f('k9_37')
f('k9_38')
f('k9_39')
f('k9_3')
f('k9_40')
f('k9_41')
f('k9_42')
f('k9_43')
f('k9_44')
f('k9_45')
f('k9_46')
f('k9_47')
f('k9_48')
f('k9_49')
f('k9_4')
f('k9_5')
f('k9_6')
f('k9_7')
f('k9_8')
f('k9_9')
f('amphichiral15')
f('celtic3')
f('D16')
f('fiveloops')
f('flower')
f('fourloops')
f('hexknot2')
f('hexknot')
f('k_infinity')
f('k11a179')
f('k11a1')
f('k11a361')
f('k11n157_morenodes')
f('k11n157')
f('k11n22')
f('k12n_0242')
f('k12n_0411')
f('longthin')
f('ochiai')
f('ornamental20')
f('perko_A')
f('perko_B')
f('pretzel_2_3_7')
f('pretzel_7_3_7')
f('pretzel_p3_p5_p7_m3_m5')
f('reefknot')
f('satellite')
f('sum_31_41')
f('T20')
f('three_figure_eights')
f('trefoil_of_trefoils')
f('triloop')
f('unknot')
dev.off()
| /inst/allknotplotter.R | no_license | RobinHankin/knotR | R | false | false | 2,350 | r | # This is an R script to make a PDF of all the knots in the package.
# Compare file knotplotter.R. which plots knots up to seven crossings
# and creates knot_table.pdf
# I can't figure out a way to do plot all knots automatically from the
# contents of the data/ directory. To create this file from scratch,
# use something like:
# for i in ../data/*.R ; do sed -n "1p" $i |tr -d '`<-' ; done
# Use this to generate a text file of all knot names. Then add the
# three setup lines below, which define function f(), load the
# package, and open a PDF file. Then use your friendly neighbourhood
# emacs to change "foo" to "f('foo')"; then add "dev.off()" at the
# end.
## setup lines start
f <- function(s){knotplot(get(s),main=s)}
library("knotR")
pdf(file="allknots.pdf")
## setup lines end
f('k10_123')
f('k10_1')
f('k10_47')
f('k10_61')
f('k12a1202')
f('k12n838')
f('k3_1a')
f('k3_1')
f('k4_1a')
f('k4_1')
f('k5_1')
f('k5_2')
f('k6_1')
f('k6_2')
f('k6_3')
f('k7_1')
f('k7_2')
f('k7_3')
f('k7_4')
f('k7_5')
f('k7_6')
f('k7_7a')
f('k7_7')
f('k8_10')
f('k8_11')
f('k8_12')
f('k8_13')
f('k8_14')
f('k8_15')
f('k8_16')
f('k8_17')
f('k8_18')
f('k8_19a')
f('k8_19')
f('k8_1')
f('k8_20')
f('k8_21')
f('k8_2')
f('k8_3')
f('k8_4a')
f('k8_4')
f('k8_5')
f('k8_6')
f('k8_7')
f('k8_8')
f('k8_9')
f('k9_10')
f('k9_11')
f('k9_12')
f('k9_13')
f('k9_14')
f('k9_15')
f('k9_16')
f('k9_17')
f('k9_18')
f('k9_19')
f('k9_1')
f('k9_20')
f('k9_21')
f('k9_22')
f('k9_23a')
f('k9_23')
f('k9_24')
f('k9_25')
f('k9_26')
f('k9_27')
f('k9_28')
f('k9_29')
f('k9_2')
f('k9_30')
f('k9_31')
f('k9_32')
f('k9_33')
f('k9_34')
f('k9_35')
f('k9_36')
f('k9_37')
f('k9_38')
f('k9_39')
f('k9_3')
f('k9_40')
f('k9_41')
f('k9_42')
f('k9_43')
f('k9_44')
f('k9_45')
f('k9_46')
f('k9_47')
f('k9_48')
f('k9_49')
f('k9_4')
f('k9_5')
f('k9_6')
f('k9_7')
f('k9_8')
f('k9_9')
f('amphichiral15')
f('celtic3')
f('D16')
f('fiveloops')
f('flower')
f('fourloops')
f('hexknot2')
f('hexknot')
f('k_infinity')
f('k11a179')
f('k11a1')
f('k11a361')
f('k11n157_morenodes')
f('k11n157')
f('k11n22')
f('k12n_0242')
f('k12n_0411')
f('longthin')
f('ochiai')
f('ornamental20')
f('perko_A')
f('perko_B')
f('pretzel_2_3_7')
f('pretzel_7_3_7')
f('pretzel_p3_p5_p7_m3_m5')
f('reefknot')
f('satellite')
f('sum_31_41')
f('T20')
f('three_figure_eights')
f('trefoil_of_trefoils')
f('triloop')
f('unknot')
dev.off()
|
has_dim <- function(x) {
length(dim(x)) > 0L || has_nonnull_names(x)
}
needs_dim <- function(x) {
length(dim(x)) > 1L
}
has_null_names <- function(x) {
is.null(names(x))
}
has_nonnull_names <- function(x) {
!has_null_names(x)
}
set_class <- `class<-`
check_no_dim <- function(x) {
if (is_atomic(x) && has_dim(x)) {
abort(error_1d_array_column())
}
invisible(x)
}
strip_dim <- function(x) {
if (is.matrix(x)) {
rownames(x) <- NULL
} else if (is.data.frame(x)) {
x <- remove_rownames(x)
x[] <- map(x, strip_dim)
} else if (is_atomic(x) && has_dim(x)) {
# Careful update only if necessary, to avoid copying which is checked by
# the "copying" test in dplyr
dim(x) <- NULL
}
x
}
needs_list_col <- function(x) {
is_list(x) || length(x) != 1L
}
# Work around bug in R 3.3.0
# Can be ressigned during loading (#544)
safe_match <- match
warningc <- function(...) {
warn(paste0(...))
}
nchar_width <- function(x) {
nchar(x, type = "width")
}
cat_line <- function(...) {
cat(paste0(..., "\n"), sep = "")
}
# FIXME: Also exists in pillar, do we need to export?
tick <- function(x) {
ifelse(is.na(x), "NA", encodeString(x, quote = "`"))
}
is_syntactic <- function(x) {
ret <- (make_syntactic(x) == x)
ret[is.na(x)] <- FALSE
ret
}
tick_if_needed <- function(x) {
needs_ticks <- !is_syntactic(x)
x[needs_ticks] <- tick(x[needs_ticks])
x
}
## from rematch2, except we don't add tbl_df or tbl classes to the return value
re_match <- function(text, pattern, perl = TRUE, ...) {
stopifnot(is.character(pattern), length(pattern) == 1, !is.na(pattern))
text <- as.character(text)
match <- regexpr(pattern, text, perl = perl, ...)
start <- as.vector(match)
length <- attr(match, "match.length")
end <- start + length - 1L
matchstr <- substring(text, start, end)
matchstr[ start == -1 ] <- NA_character_
res <- data.frame(
stringsAsFactors = FALSE,
.text = text,
.match = matchstr
)
if (!is.null(attr(match, "capture.start"))) {
gstart <- attr(match, "capture.start")
glength <- attr(match, "capture.length")
gend <- gstart + glength - 1L
groupstr <- substring(text, gstart, gend)
groupstr[ gstart == -1 ] <- NA_character_
dim(groupstr) <- dim(gstart)
res <- cbind(groupstr, res, stringsAsFactors = FALSE)
}
names(res) <- c(attr(match, "capture.names"), ".text", ".match")
res
}
is_rstudio <- function() {
!is.na(Sys.getenv("RSTUDIO", unset = NA))
}
| /R/utils.r | no_license | takewiki/tibble | R | false | false | 2,508 | r |
has_dim <- function(x) {
length(dim(x)) > 0L || has_nonnull_names(x)
}
needs_dim <- function(x) {
length(dim(x)) > 1L
}
has_null_names <- function(x) {
is.null(names(x))
}
has_nonnull_names <- function(x) {
!has_null_names(x)
}
set_class <- `class<-`
check_no_dim <- function(x) {
if (is_atomic(x) && has_dim(x)) {
abort(error_1d_array_column())
}
invisible(x)
}
strip_dim <- function(x) {
if (is.matrix(x)) {
rownames(x) <- NULL
} else if (is.data.frame(x)) {
x <- remove_rownames(x)
x[] <- map(x, strip_dim)
} else if (is_atomic(x) && has_dim(x)) {
# Careful update only if necessary, to avoid copying which is checked by
# the "copying" test in dplyr
dim(x) <- NULL
}
x
}
needs_list_col <- function(x) {
is_list(x) || length(x) != 1L
}
# Work around bug in R 3.3.0
# Can be ressigned during loading (#544)
safe_match <- match
warningc <- function(...) {
warn(paste0(...))
}
nchar_width <- function(x) {
nchar(x, type = "width")
}
cat_line <- function(...) {
cat(paste0(..., "\n"), sep = "")
}
# FIXME: Also exists in pillar, do we need to export?
tick <- function(x) {
ifelse(is.na(x), "NA", encodeString(x, quote = "`"))
}
is_syntactic <- function(x) {
ret <- (make_syntactic(x) == x)
ret[is.na(x)] <- FALSE
ret
}
tick_if_needed <- function(x) {
needs_ticks <- !is_syntactic(x)
x[needs_ticks] <- tick(x[needs_ticks])
x
}
## from rematch2, except we don't add tbl_df or tbl classes to the return value
re_match <- function(text, pattern, perl = TRUE, ...) {
stopifnot(is.character(pattern), length(pattern) == 1, !is.na(pattern))
text <- as.character(text)
match <- regexpr(pattern, text, perl = perl, ...)
start <- as.vector(match)
length <- attr(match, "match.length")
end <- start + length - 1L
matchstr <- substring(text, start, end)
matchstr[ start == -1 ] <- NA_character_
res <- data.frame(
stringsAsFactors = FALSE,
.text = text,
.match = matchstr
)
if (!is.null(attr(match, "capture.start"))) {
gstart <- attr(match, "capture.start")
glength <- attr(match, "capture.length")
gend <- gstart + glength - 1L
groupstr <- substring(text, gstart, gend)
groupstr[ gstart == -1 ] <- NA_character_
dim(groupstr) <- dim(gstart)
res <- cbind(groupstr, res, stringsAsFactors = FALSE)
}
names(res) <- c(attr(match, "capture.names"), ".text", ".match")
res
}
is_rstudio <- function() {
!is.na(Sys.getenv("RSTUDIO", unset = NA))
}
|
getTypes =
function(obj, env, elementType = FALSE)
{
if(is(obj, "integer"))
Int32Type
else if(is(obj, "numeric"))
DoubleType
else if(is.name(obj)) {
ans = env$.types[[as.character(obj)]]
if(elementType)
getTypeOfElement(ans)
else
ans
} else if(is.call(obj)) {
# temporarily deal with x[ expr ]
if(obj[[1]] == as.name("[")) # XXX not in any way general And doesn't handle vectors being returned.
return(getTypes(obj[[2]], env, TRUE))
} else {
stop("Can't determine type for ", class(obj))
}
}
getMathOpType =
# Convenience function for checking types in math ops: if types are
# same, return the common type; it not, return DoubleType (as this
# will be what we should coerce to).
function(types)
{
if( identical(types[[1]], types[[2]]) )
return(types[[1]])
i = match(types, c(Int32Type, DoubleType))
if(!any(is.na(i)))
return(DoubleType)
}
getTypeOfElement =
#
# Given a pointer type or an array type, return the type of the underlying element.
#
function(type)
{
if (identical(type, DoublePtrType))
return(DoubleType)
else if (identical(type, Int32PtrType))
return(Int32Type)
else if (identical(type, FloatPtrType))
return(FloatType)
else
stop("This type is not yet implemented.")
}
# There is an S4 generic getType in llvm. Why not proide methods for that
getType =
function(val, env)
UseMethod("getType")
getType.character =
function(val, env)
{
env$.types[[val]]
}
getType.name =
function(val, env)
getType(as.character(val), env)
getType.ConstantInt =
function(val, env)
{
Int32Type
}
getType.ConstantFP =
function(val, env)
{
DoubleType
}
getType.default =
function(val, env)
return(NULL)
| /R/types.R | no_license | vsbuffalo/RLLVMCompile | R | false | false | 1,783 | r | getTypes =
function(obj, env, elementType = FALSE)
{
if(is(obj, "integer"))
Int32Type
else if(is(obj, "numeric"))
DoubleType
else if(is.name(obj)) {
ans = env$.types[[as.character(obj)]]
if(elementType)
getTypeOfElement(ans)
else
ans
} else if(is.call(obj)) {
# temporarily deal with x[ expr ]
if(obj[[1]] == as.name("[")) # XXX not in any way general And doesn't handle vectors being returned.
return(getTypes(obj[[2]], env, TRUE))
} else {
stop("Can't determine type for ", class(obj))
}
}
getMathOpType =
# Convenience function for checking types in math ops: if types are
# same, return the common type; it not, return DoubleType (as this
# will be what we should coerce to).
function(types)
{
if( identical(types[[1]], types[[2]]) )
return(types[[1]])
i = match(types, c(Int32Type, DoubleType))
if(!any(is.na(i)))
return(DoubleType)
}
getTypeOfElement =
#
# Given a pointer type or an array type, return the type of the underlying element.
#
function(type)
{
if (identical(type, DoublePtrType))
return(DoubleType)
else if (identical(type, Int32PtrType))
return(Int32Type)
else if (identical(type, FloatPtrType))
return(FloatType)
else
stop("This type is not yet implemented.")
}
# There is an S4 generic getType in llvm. Why not proide methods for that
getType =
function(val, env)
UseMethod("getType")
getType.character =
function(val, env)
{
env$.types[[val]]
}
getType.name =
function(val, env)
getType(as.character(val), env)
getType.ConstantInt =
function(val, env)
{
Int32Type
}
getType.ConstantFP =
function(val, env)
{
DoubleType
}
getType.default =
function(val, env)
return(NULL)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/freq.R
\name{freq}
\alias{freq}
\title{Frequency Tables for Factors and Other Discrete Data}
\usage{
freq(x, var = NULL, round.digits = st_options("round.digits"),
order = "default", style = st_options("style"),
plain.ascii = st_options("plain.ascii"), justify = "default",
cumul = st_options("freq.cumul"), totals = st_options("freq.totals"),
report.nas = st_options("freq.report.nas"), rows = numeric(),
missing = "", display.type = TRUE,
display.labels = st_options("display.labels"),
headings = st_options("headings"), weights = NA,
rescale.weights = FALSE, ...)
}
\arguments{
\item{x}{Factor or vector, or data frame when \emph{y} is also provided,
usually in a piped call.}
\item{var}{Unquoted expression referring to a specific column in x. Provides
support for piped function calls (e.g. \code{df \%>\% freq(some_var)}.}
\item{round.digits}{Number of significant digits to display. Defaults to
\code{2} and can be set globally; see \code{\link{st_options}}.}
\item{order}{Ordering of rows in frequency table; \dQuote{names} (default for
non-factors), \dQuote{levels} (default for factors), or \dQuote{freq} (from
most frequent to less frequent). To invert the order, place a minus sign
before or after the word. \dQuote{-freq} will thus display the items
starting from the lowest in frequency to the highest, and so forth.}
\item{style}{Style to be used by \code{\link[pander]{pander}} when rendering
output table; One of \dQuote{simple} (default), \dQuote{grid}, or
\dQuote{rmarkdown} This option can be set globally; see
\code{\link{st_options}}.}
\item{plain.ascii}{Logical. \code{\link[pander]{pander}} argument; when
\code{TRUE}, no markup characters will be used (useful when printing to
console). Defaults to \code{TRUE} unless \code{style = 'rmarkdown'}, in
which case it will be set to \code{FALSE} automatically. To change the
default value globally, see \code{\link{st_options}}.}
\item{justify}{String indicating alignment of columns. By default
(\dQuote{default}), \dQuote{right} is used for text tables and
\dQuote{center} is used for \emph{html} tables. You can force it to one of
\dQuote{left}, \dQuote{center}, or \dQuote{right}.}
\item{cumul}{Logical. Set to \code{FALSE} to hide cumulative proportions
from results. \code{TRUE} by default. To change this value globally, see
\code{\link{st_options}}.}
\item{totals}{Logical. Set to \code{FALSE} to hide totals from results.
\code{TRUE} by default. To change this value globally, see
\code{\link{st_options}}.}
\item{report.nas}{Logical. Set to \code{FALSE} to turn off reporting of
missing values. To change this default value globally, see
\code{\link{st_options}}.}
\item{rows}{Character or numeric vector allowing subsetting of the results.
The order given here will be reflected in the resulting table. If a single
string is used, it will be used as a regular expression to filter row
names.}
\item{missing}{Characters to display in NA cells. Defaults to \dQuote{}.}
\item{display.type}{Logical. Should variable type be displayed? Default is
\code{TRUE}.}
\item{display.labels}{Logical. Should variable / data frame labels be
displayed? Default is \code{TRUE}. To change this default value globally,
see \code{\link{st_options}}.}
\item{headings}{Logical. Set to \code{FALSE} to omit heading section. Can be
set globally via \code{\link{st_options}}.}
\item{weights}{Vector of weights; must be of the same length as \code{x}.}
\item{rescale.weights}{Logical parameter. When set to \code{TRUE}, the total
count will be the same as the unweighted \code{x}. \code{FALSE} by default.}
\item{\dots}{Additional arguments passed to \code{\link[pander]{pander}}.}
}
\value{
A frequency table of class \code{matrix} and \code{summarytools} with
added attributes used by \emph{print} method.
}
\description{
Displays weighted or unweighted frequencies, including <NA> counts and
proportions.
}
\details{
The default \code{plain.ascii = TRUE} option is there to make
results appear cleaner in the console. To avoid rmarkdown rendering
problems, this option is automatically set to \code{FALSE} whenever
\code{style = "rmarkdown"} (unless \code{plain.ascii = TRUE} is made
explicit in the function call).
}
\examples{
data(tobacco)
freq(tobacco$gender)
freq(tobacco$gender, totals = FALSE)
# Ignore NA's, don't show totals, omit headings
freq(tobacco$gender, report.nas = FALSE, totals = FALSE, headings = FALSE)
# In .Rmd documents, use the two following arguments, minimally
freq(tobacco$gender, style="rmarkdown", plain.ascii = FALSE)
# Grouped Frequencies
with(tobacco, stby(diseased, smoker, freq))
(fr_smoker_by_gender <- with(tobacco, stby(smoker, gender, freq)))
# Print html Source
print(fr_smoker_by_gender, method = "render", footnote = NA)
# Order by frequency (+ to -)
freq(tobacco$age.gr, order = "freq")
# Order by frequency (- to +)
freq(tobacco$age.gr, order = "-freq")
# Use the 'rows' argument to display only the 10 most common items
freq(tobacco$age.gr, order = "freq", rows = 1:10)
\dontrun{
# Display rendered html results in RStudio's Viewer
# notice 'view()' is NOT written with capital V
# If working outside RStudio, Web browser is used instead
# A temporary file is stored in temp dir
view(fr_smoker_by_gender)
# Display rendered html results in default Web browser
# A temporary file is stored in temp dir here too
print(fr_smoker_by_gender, method = "browser")
# Write results to text file (.txt, .md, .Rmd) or html file (.html)
print(fr_smoker_by_gender, method = "render", file = "fr_smoker_by_gender.md)
print(fr_smoker_by_gender, method = "render", file = "fr_smoker_by_gender.html)
}
}
\seealso{
\code{\link[base]{table}}
}
\author{
Dominic Comtois, \email{dominic.comtois@gmail.com}
}
\keyword{category}
\keyword{classes}
\keyword{univar}
| /man/freq.Rd | no_license | jrgant/summarytools | R | false | true | 5,882 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/freq.R
\name{freq}
\alias{freq}
\title{Frequency Tables for Factors and Other Discrete Data}
\usage{
freq(x, var = NULL, round.digits = st_options("round.digits"),
order = "default", style = st_options("style"),
plain.ascii = st_options("plain.ascii"), justify = "default",
cumul = st_options("freq.cumul"), totals = st_options("freq.totals"),
report.nas = st_options("freq.report.nas"), rows = numeric(),
missing = "", display.type = TRUE,
display.labels = st_options("display.labels"),
headings = st_options("headings"), weights = NA,
rescale.weights = FALSE, ...)
}
\arguments{
\item{x}{Factor or vector, or data frame when \emph{y} is also provided,
usually in a piped call.}
\item{var}{Unquoted expression referring to a specific column in x. Provides
support for piped function calls (e.g. \code{df \%>\% freq(some_var)}.}
\item{round.digits}{Number of significant digits to display. Defaults to
\code{2} and can be set globally; see \code{\link{st_options}}.}
\item{order}{Ordering of rows in frequency table; \dQuote{names} (default for
non-factors), \dQuote{levels} (default for factors), or \dQuote{freq} (from
most frequent to less frequent). To invert the order, place a minus sign
before or after the word. \dQuote{-freq} will thus display the items
starting from the lowest in frequency to the highest, and so forth.}
\item{style}{Style to be used by \code{\link[pander]{pander}} when rendering
output table; One of \dQuote{simple} (default), \dQuote{grid}, or
\dQuote{rmarkdown} This option can be set globally; see
\code{\link{st_options}}.}
\item{plain.ascii}{Logical. \code{\link[pander]{pander}} argument; when
\code{TRUE}, no markup characters will be used (useful when printing to
console). Defaults to \code{TRUE} unless \code{style = 'rmarkdown'}, in
which case it will be set to \code{FALSE} automatically. To change the
default value globally, see \code{\link{st_options}}.}
\item{justify}{String indicating alignment of columns. By default
(\dQuote{default}), \dQuote{right} is used for text tables and
\dQuote{center} is used for \emph{html} tables. You can force it to one of
\dQuote{left}, \dQuote{center}, or \dQuote{right}.}
\item{cumul}{Logical. Set to \code{FALSE} to hide cumulative proportions
from results. \code{TRUE} by default. To change this value globally, see
\code{\link{st_options}}.}
\item{totals}{Logical. Set to \code{FALSE} to hide totals from results.
\code{TRUE} by default. To change this value globally, see
\code{\link{st_options}}.}
\item{report.nas}{Logical. Set to \code{FALSE} to turn off reporting of
missing values. To change this default value globally, see
\code{\link{st_options}}.}
\item{rows}{Character or numeric vector allowing subsetting of the results.
The order given here will be reflected in the resulting table. If a single
string is used, it will be used as a regular expression to filter row
names.}
\item{missing}{Characters to display in NA cells. Defaults to \dQuote{}.}
\item{display.type}{Logical. Should variable type be displayed? Default is
\code{TRUE}.}
\item{display.labels}{Logical. Should variable / data frame labels be
displayed? Default is \code{TRUE}. To change this default value globally,
see \code{\link{st_options}}.}
\item{headings}{Logical. Set to \code{FALSE} to omit heading section. Can be
set globally via \code{\link{st_options}}.}
\item{weights}{Vector of weights; must be of the same length as \code{x}.}
\item{rescale.weights}{Logical parameter. When set to \code{TRUE}, the total
count will be the same as the unweighted \code{x}. \code{FALSE} by default.}
\item{\dots}{Additional arguments passed to \code{\link[pander]{pander}}.}
}
\value{
A frequency table of class \code{matrix} and \code{summarytools} with
added attributes used by \emph{print} method.
}
\description{
Displays weighted or unweighted frequencies, including <NA> counts and
proportions.
}
\details{
The default \code{plain.ascii = TRUE} option is there to make
results appear cleaner in the console. To avoid rmarkdown rendering
problems, this option is automatically set to \code{FALSE} whenever
\code{style = "rmarkdown"} (unless \code{plain.ascii = TRUE} is made
explicit in the function call).
}
\examples{
data(tobacco)
freq(tobacco$gender)
freq(tobacco$gender, totals = FALSE)
# Ignore NA's, don't show totals, omit headings
freq(tobacco$gender, report.nas = FALSE, totals = FALSE, headings = FALSE)
# In .Rmd documents, use the two following arguments, minimally
freq(tobacco$gender, style="rmarkdown", plain.ascii = FALSE)
# Grouped Frequencies
with(tobacco, stby(diseased, smoker, freq))
(fr_smoker_by_gender <- with(tobacco, stby(smoker, gender, freq)))
# Print html Source
print(fr_smoker_by_gender, method = "render", footnote = NA)
# Order by frequency (+ to -)
freq(tobacco$age.gr, order = "freq")
# Order by frequency (- to +)
freq(tobacco$age.gr, order = "-freq")
# Use the 'rows' argument to display only the 10 most common items
freq(tobacco$age.gr, order = "freq", rows = 1:10)
\dontrun{
# Display rendered html results in RStudio's Viewer
# notice 'view()' is NOT written with capital V
# If working outside RStudio, Web browser is used instead
# A temporary file is stored in temp dir
view(fr_smoker_by_gender)
# Display rendered html results in default Web browser
# A temporary file is stored in temp dir here too
print(fr_smoker_by_gender, method = "browser")
# Write results to text file (.txt, .md, .Rmd) or html file (.html)
print(fr_smoker_by_gender, method = "render", file = "fr_smoker_by_gender.md)
print(fr_smoker_by_gender, method = "render", file = "fr_smoker_by_gender.html)
}
}
\seealso{
\code{\link[base]{table}}
}
\author{
Dominic Comtois, \email{dominic.comtois@gmail.com}
}
\keyword{category}
\keyword{classes}
\keyword{univar}
|
# my_rscript1.R
args <- commandArgs(TRUE)
N <- args[1]
load("FourDatasets.Rdata")
library(rms)
library(Cairo)
S5 <- with(data_symbol_T_cli, Surv(Survtime,Surv==1))
# dang yingshu fengxi.R
variable<-unlist(strsplit(N, split="+",fixed=T))
P = rep(NA,length(variable))
for(i in 1:length(variable)){
P[i] = anova(cph(as.formula(paste("S5~",variable[i])),data=data_symbol_T_cli))[5]
d1<-data_symbol_T_cli[,variable[i]]
group = ifelse(d1<=median(d1),1,2)
if(d1<=median(d1)){risk=c("High Level","Low Level")}else{risk=c("Low Level","High Level")}
CairoPNG(filename=paste(variable[i],".png"), width=450, height=300)
kmfit1<-npsurv(S5~group,data=data_symbol_T_cli)
survplot(kmfit1, conf='none',col=1:2,label.curves=FALSE,xlab='survival time in month',ylab='survival probabilities')
legend("topright",col=1:2,c("Low Level","High Level"),lwd=5)
if(P[i]<0.001){
legend(4,0.2,"<0.001",bty="n")}
else{
legend(4,0.2,round(P[i],3),bty="n")
}
legend(0.2,"P=",bty="n")
title(variable[i])
dev.off()
}
write.csv(round(P,5), file = "foo.csv", row.names = F, quote = F) # 空格分隔
# duo yingshu fengxi.R
f2<-cph(as.formula(paste("S5~",N)),surv=TRUE,x=TRUE,y=TRUE,data=data_symbol_T_cli)
f1<-anova(f2)
write.csv(round(f1,7),file = "543.csv")
CairoPNG(filename="temp.png", width=900, height=600)
plot(f1)
da<-validate(f2,data=data_symbol_T_cli,B=200,dxy=TRUE)
write.csv(round(da,4),file = "da.csv")
da2<-validate(f2,data=data_symbol_cli_T,B=200,dxy=TRUE)
write.csv(round(da2,4),file = "da2.csv")
| /survivalt1 - 副本 (2).R | no_license | kbvstmd/oy | R | false | false | 1,516 | r | # my_rscript1.R
args <- commandArgs(TRUE)
N <- args[1]
load("FourDatasets.Rdata")
library(rms)
library(Cairo)
S5 <- with(data_symbol_T_cli, Surv(Survtime,Surv==1))
# dang yingshu fengxi.R
variable<-unlist(strsplit(N, split="+",fixed=T))
P = rep(NA,length(variable))
for(i in 1:length(variable)){
P[i] = anova(cph(as.formula(paste("S5~",variable[i])),data=data_symbol_T_cli))[5]
d1<-data_symbol_T_cli[,variable[i]]
group = ifelse(d1<=median(d1),1,2)
if(d1<=median(d1)){risk=c("High Level","Low Level")}else{risk=c("Low Level","High Level")}
CairoPNG(filename=paste(variable[i],".png"), width=450, height=300)
kmfit1<-npsurv(S5~group,data=data_symbol_T_cli)
survplot(kmfit1, conf='none',col=1:2,label.curves=FALSE,xlab='survival time in month',ylab='survival probabilities')
legend("topright",col=1:2,c("Low Level","High Level"),lwd=5)
if(P[i]<0.001){
legend(4,0.2,"<0.001",bty="n")}
else{
legend(4,0.2,round(P[i],3),bty="n")
}
legend(0.2,"P=",bty="n")
title(variable[i])
dev.off()
}
write.csv(round(P,5), file = "foo.csv", row.names = F, quote = F) # 空格分隔
# duo yingshu fengxi.R
f2<-cph(as.formula(paste("S5~",N)),surv=TRUE,x=TRUE,y=TRUE,data=data_symbol_T_cli)
f1<-anova(f2)
write.csv(round(f1,7),file = "543.csv")
CairoPNG(filename="temp.png", width=900, height=600)
plot(f1)
da<-validate(f2,data=data_symbol_T_cli,B=200,dxy=TRUE)
write.csv(round(da,4),file = "da.csv")
da2<-validate(f2,data=data_symbol_cli_T,B=200,dxy=TRUE)
write.csv(round(da2,4),file = "da2.csv")
|
clean.files <-
function ()
{
files.to.delete <- c('JUICE2.txt.Rout', 'result.txt', 'ordi-result.txt', 'JUICE2.txt', 'sh_expo.txt', 'specdata.txt', 'dca_lfa.r', 'dca_lfq.r', 'pca_lfa.r', 'pca_lfq.r', 'nmds_lfa.r', 'nmds_lfq.r', 'updat.R', 'updtchck.r', 'clean.R')
deleted.files <- files.to.delete [files.to.delete %in% list.files ()]
try (file.remove (deleted.files))
del.dir1 <- list.files (pattern = '2D_figures_')
try (unlink (del.dir1, recursive = T))
del.dir2 <- list.files (pattern = '3D_snapshots_')
try (unlink (del.dir2, recursive = T))
if (length (deleted.files) > 0 || length (del.dir1) > 0 || length (del.dir2) > 0)
winDialog.m ('ok', message = paste(c('These items have been deleted: \n\n files:\n', deleted.files, '\n\ndirectories:\n', c (del.dir1, del.dir2)), sep = '', collapse = ' '))
}
| /R/clean.files.R | no_license | zdealveindy/ordijuice | R | false | false | 826 | r | clean.files <-
function ()
{
files.to.delete <- c('JUICE2.txt.Rout', 'result.txt', 'ordi-result.txt', 'JUICE2.txt', 'sh_expo.txt', 'specdata.txt', 'dca_lfa.r', 'dca_lfq.r', 'pca_lfa.r', 'pca_lfq.r', 'nmds_lfa.r', 'nmds_lfq.r', 'updat.R', 'updtchck.r', 'clean.R')
deleted.files <- files.to.delete [files.to.delete %in% list.files ()]
try (file.remove (deleted.files))
del.dir1 <- list.files (pattern = '2D_figures_')
try (unlink (del.dir1, recursive = T))
del.dir2 <- list.files (pattern = '3D_snapshots_')
try (unlink (del.dir2, recursive = T))
if (length (deleted.files) > 0 || length (del.dir1) > 0 || length (del.dir2) > 0)
winDialog.m ('ok', message = paste(c('These items have been deleted: \n\n files:\n', deleted.files, '\n\ndirectories:\n', c (del.dir1, del.dir2)), sep = '', collapse = ' '))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tailDependence.R
\name{tailDependencePlot}
\alias{tailDependencePlot}
\title{Function that plots the "tail-dependence" as a function of the chosen quantile.}
\usage{
tailDependencePlot(
formula,
data,
q = seq(0.1, 0.2, 0.005),
tail = "lwr",
method = "fast"
)
}
\arguments{
\item{formula}{a formula object, with the response on the left of a ~
operator, and the terms on the right. The response must be a
survival object as returned by the \code{Surv} function. The RHS must contain a 'cluster' term.}
\item{data}{a data.frame containing the variables in the model.}
\item{q}{Quantiles to estimate "tail-dependence" for.}
\item{tail}{Tail to estimate "tail dependence" for ("lwr" or "upr").}
\item{method}{What estimator to use. Can either be "fast" or "dabrowska".}
}
\value{
Plot of "tail-dependence" as a function of the quantile; estimated and implied by frailty models.
}
\description{
Function that plots the "tail-dependence" as a function of the chosen quantile
}
\seealso{
tailDepCI tailDependence
}
\author{
Jeppe E. H. Madsen <jeppe.ekstrand.halkjaer@gmail.com>
}
| /man/tailDependencePlot.Rd | no_license | Jeepen/biSurv | R | false | true | 1,167 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tailDependence.R
\name{tailDependencePlot}
\alias{tailDependencePlot}
\title{Function that plots the "tail-dependence" as a function of the chosen quantile.}
\usage{
tailDependencePlot(
formula,
data,
q = seq(0.1, 0.2, 0.005),
tail = "lwr",
method = "fast"
)
}
\arguments{
\item{formula}{a formula object, with the response on the left of a ~
operator, and the terms on the right. The response must be a
survival object as returned by the \code{Surv} function. The RHS must contain a 'cluster' term.}
\item{data}{a data.frame containing the variables in the model.}
\item{q}{Quantiles to estimate "tail-dependence" for.}
\item{tail}{Tail to estimate "tail dependence" for ("lwr" or "upr").}
\item{method}{What estimator to use. Can either be "fast" or "dabrowska".}
}
\value{
Plot of "tail-dependence" as a function of the quantile; estimated and implied by frailty models.
}
\description{
Function that plots the "tail-dependence" as a function of the chosen quantile
}
\seealso{
tailDepCI tailDependence
}
\author{
Jeppe E. H. Madsen <jeppe.ekstrand.halkjaer@gmail.com>
}
|
#' A class handling modification characteristics
#'
#' This class defines a single modification type that can be searched for by
#' MS-GF+. Modifications are collected in a
#' \code{\link{msgfParModificationList}} before adding them to
#' \code{\linkS4class{msgfPar}} objects.
#'
#' @slot composition The molecular formula for the modification.
#' @slot mass The monoisotopic mass of the modification
#' @slot residues The amino acids the modification applies to
#' @slot type Whether the modification is optional or always present
#' @slot position The possibel position of the modification
#' @slot name The name of the modification
#'
#' @examples
#' # Using composition
#' modification1 <- msgfParModification(
#' name='Carbamidomethyl',
#' composition='C2H3N1O1',
#' residues='C',
#' type='fix',
#' position='any'
#' )
#' # Using exact mass
#' modification2 <- msgfParModification(
#' name='Oxidation',
#' mass=15.994915,
#' residues='M',
#' type='opt',
#' position='any'
#' )
#'
#' @family msgfParClasses
#'
setClass(
'msgfParModification',
representation=representation(
composition='character',
mass='numeric',
residues='character',
type='character',
position='character',
name='character'
),
validity=function(object){
if(object@composition==''){
if(is.na(object@mass)){
return('Either mass or composition must be specified')
} else {}
} else {
compSplit <- gregexpr('[A-Z][a-z]?', object@composition, perl=TRUE)
if(!all(regmatches(object@composition, compSplit)[[1]] %in% c('C', 'H', 'N', 'O', 'S', 'P', 'Br', 'Cl', 'Fe', ''))){
return('Modification must consist of only \'C\', \'H\', \'N\', \'O\', \'S\', \'P\', \'Br\', \'Cl\' and \'Fe\'')
} else {}
}
if(object@residues != '*'){
res <- strsplit(object@residues, '')[[1]]
if(!all(toupper(res) %in% c('A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V'))){
return('Residues must be one of the 20 common amino acids in one letter code')
} else {}
} else {}
if(!(tolower(object@type) %in% c('fix', 'opt'))){
return('The type of the modification must be either \'fix\' for fixed or \'opt\' for optional')
} else {}
if(!(tolower(object@position) %in% c('any', 'n-term', 'nterm', 'c-term', 'cterm', 'prot-n-term', 'protnterm', 'prot-c-term', 'protcterm'))){
return('Position must be either \'any\', \'n-term\', \'c-term\', \'prot-n-term\' or \'prot-c-term\'')
} else {}
}
)
#' @describeIn msgfParModification Get \code{\link[base]{system}} compliant
#' function call
#'
#' @return For getMSGFpar() A string.
#'
setMethod(
'getMSGFpar', 'msgfParModification',
function(object){
if(object@composition != ''){
compSplit <- gregexpr('[- | \\d]+', object@composition, perl=TRUE)
compOrder <- order(match(regmatches(object@composition, compSplit, invert=TRUE)[[1]], c('C', 'H', 'N', 'O', 'S', 'P', 'Br', 'Cl', 'Fe', '')))
compOrdered <- paste(regmatches(object@composition, compSplit, invert=TRUE)[[1]][compOrder], regmatches(object@composition, compSplit)[[1]][compOrder], sep='')[1:length(compSplit[[1]])]
ans <- paste(compOrdered, collapse='')
} else {
ans <- paste(object@mass)
}
ans <- paste(ans, toupper(object@residues), object@type, object@position, object@name, sep=',')
ans
}
)
#' @describeIn msgfParModification Short summary of msgfParModification object
#'
#' @param object An msgfParModification object
#'
setMethod(
'show', 'msgfParModification',
function(object){
cat(object@name, ':\t', if(object@composition != '') object@composition else object@mass, ', ', object@residues, ', ', object@type, ', ', object@position, '\n', sep='')
}
)
#' @rdname msgfParModification-class
#'
#' @param name The name of the modification
#'
#' @param composition The molecular formular as a string for the modification.
#' Loss of atoms are denoted with negative integers (e.g. O-1 for loss of
#' oxygen)
#'
#' @param mass The monoisotopic mass change between a peptide without and with
#' the given modification. Either composition or mass must be defined.
#'
#' @param residues The amino acids that the modification applies to. Given as
#' their one-letter code in upper-case without any separation. Use '*' for all
#' residues
#'
#' @param type Either 'fix' or 'opt' for fixed or optional
#'
#' @param position Where the modification can be. Either 'any', 'c-term',
#' 'n-term', 'prot-c-term' or 'prot-n-term.
#'
#' @return For msgfParModification() An msgfParModification object.
#'
#' @export
#'
msgfParModification <- function(name, composition='', mass=as.numeric(NA), residues, type, position){
if(type == 'fixed') type <- 'fix'
if(type == 'optional') type <- 'opt'
new(Class='msgfParModification', name=name, composition=composition, mass=mass, residues=residues, type=type, position=position)
}
| /R/msgfParModification.R | no_license | thomasp85/MSGFplus-release | R | false | false | 5,338 | r | #' A class handling modification characteristics
#'
#' This class defines a single modification type that can be searched for by
#' MS-GF+. Modifications are collected in a
#' \code{\link{msgfParModificationList}} before adding them to
#' \code{\linkS4class{msgfPar}} objects.
#'
#' @slot composition The molecular formula for the modification.
#' @slot mass The monoisotopic mass of the modification
#' @slot residues The amino acids the modification applies to
#' @slot type Whether the modification is optional or always present
#' @slot position The possibel position of the modification
#' @slot name The name of the modification
#'
#' @examples
#' # Using composition
#' modification1 <- msgfParModification(
#' name='Carbamidomethyl',
#' composition='C2H3N1O1',
#' residues='C',
#' type='fix',
#' position='any'
#' )
#' # Using exact mass
#' modification2 <- msgfParModification(
#' name='Oxidation',
#' mass=15.994915,
#' residues='M',
#' type='opt',
#' position='any'
#' )
#'
#' @family msgfParClasses
#'
setClass(
'msgfParModification',
representation=representation(
composition='character',
mass='numeric',
residues='character',
type='character',
position='character',
name='character'
),
validity=function(object){
if(object@composition==''){
if(is.na(object@mass)){
return('Either mass or composition must be specified')
} else {}
} else {
compSplit <- gregexpr('[A-Z][a-z]?', object@composition, perl=TRUE)
if(!all(regmatches(object@composition, compSplit)[[1]] %in% c('C', 'H', 'N', 'O', 'S', 'P', 'Br', 'Cl', 'Fe', ''))){
return('Modification must consist of only \'C\', \'H\', \'N\', \'O\', \'S\', \'P\', \'Br\', \'Cl\' and \'Fe\'')
} else {}
}
if(object@residues != '*'){
res <- strsplit(object@residues, '')[[1]]
if(!all(toupper(res) %in% c('A', 'R', 'N', 'D', 'C', 'E', 'Q', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V'))){
return('Residues must be one of the 20 common amino acids in one letter code')
} else {}
} else {}
if(!(tolower(object@type) %in% c('fix', 'opt'))){
return('The type of the modification must be either \'fix\' for fixed or \'opt\' for optional')
} else {}
if(!(tolower(object@position) %in% c('any', 'n-term', 'nterm', 'c-term', 'cterm', 'prot-n-term', 'protnterm', 'prot-c-term', 'protcterm'))){
return('Position must be either \'any\', \'n-term\', \'c-term\', \'prot-n-term\' or \'prot-c-term\'')
} else {}
}
)
#' @describeIn msgfParModification Get \code{\link[base]{system}} compliant
#' function call
#'
#' @return For getMSGFpar() A string.
#'
setMethod(
'getMSGFpar', 'msgfParModification',
function(object){
if(object@composition != ''){
compSplit <- gregexpr('[- | \\d]+', object@composition, perl=TRUE)
compOrder <- order(match(regmatches(object@composition, compSplit, invert=TRUE)[[1]], c('C', 'H', 'N', 'O', 'S', 'P', 'Br', 'Cl', 'Fe', '')))
compOrdered <- paste(regmatches(object@composition, compSplit, invert=TRUE)[[1]][compOrder], regmatches(object@composition, compSplit)[[1]][compOrder], sep='')[1:length(compSplit[[1]])]
ans <- paste(compOrdered, collapse='')
} else {
ans <- paste(object@mass)
}
ans <- paste(ans, toupper(object@residues), object@type, object@position, object@name, sep=',')
ans
}
)
#' @describeIn msgfParModification Short summary of msgfParModification object
#'
#' @param object An msgfParModification object
#'
setMethod(
'show', 'msgfParModification',
function(object){
cat(object@name, ':\t', if(object@composition != '') object@composition else object@mass, ', ', object@residues, ', ', object@type, ', ', object@position, '\n', sep='')
}
)
#' @rdname msgfParModification-class
#'
#' @param name The name of the modification
#'
#' @param composition The molecular formular as a string for the modification.
#' Loss of atoms are denoted with negative integers (e.g. O-1 for loss of
#' oxygen)
#'
#' @param mass The monoisotopic mass change between a peptide without and with
#' the given modification. Either composition or mass must be defined.
#'
#' @param residues The amino acids that the modification applies to. Given as
#' their one-letter code in upper-case without any separation. Use '*' for all
#' residues
#'
#' @param type Either 'fix' or 'opt' for fixed or optional
#'
#' @param position Where the modification can be. Either 'any', 'c-term',
#' 'n-term', 'prot-c-term' or 'prot-n-term.
#'
#' @return For msgfParModification() An msgfParModification object.
#'
#' @export
#'
msgfParModification <- function(name, composition='', mass=as.numeric(NA), residues, type, position){
if(type == 'fixed') type <- 'fix'
if(type == 'optional') type <- 'opt'
new(Class='msgfParModification', name=name, composition=composition, mass=mass, residues=residues, type=type, position=position)
}
|
# Step1. Merges the training and the test sets to create one data set.
# setwd("G:/Coursera/courseraworkspace/gettingcleaningdata")
trainData <- read.table("./data/train/X_train.txt")
dim(trainData) # 7352*561
head(trainData)
trainLabel <- read.table("./data/train/y_train.txt")
table(trainLabel)
trainSubject <- read.table("./data/train/subject_train.txt")
testData <- read.table("./data/test/X_test.txt")
dim(testData) # 2947*561
testLabel <- read.table("./data/test/y_test.txt")
table(testLabel)
testSubject <- read.table("./data/test/subject_test.txt")
joinData <- rbind(trainData, testData)
dim(joinData) # 10299*561
joinLabel <- rbind(trainLabel, testLabel)
dim(joinLabel) # 10299*1
joinSubject <- rbind(trainSubject, testSubject)
dim(joinSubject) # 10299*1
# Step2. Extracts only the measurements on the mean and standard
# deviation for each measurement.
features <- read.table("./data/features.txt")
dim(features) # 561*2
meanStdIndices <- grep("mean\\(\\)|std\\(\\)", features[, 2])
length(meanStdIndices) # 66
joinData <- joinData[, meanStdIndices]
dim(joinData) # 10299*66
names(joinData) <- gsub("\\(\\)", "", features[meanStdIndices, 2]) # remove "()"
names(joinData) <- gsub("mean", "Mean", names(joinData)) # capitalize M
names(joinData) <- gsub("std", "Std", names(joinData)) # capitalize S
names(joinData) <- gsub("-", "", names(joinData)) # remove "-" in column names
# Step3. Uses descriptive activity names to name the activities in
# the data set
activity <- read.table("./data/activity_labels.txt")
activity[, 2] <- tolower(gsub("_", "", activity[, 2]))
substr(activity[2, 2], 8, 8) <- toupper(substr(activity[2, 2], 8, 8))
substr(activity[3, 2], 8, 8) <- toupper(substr(activity[3, 2], 8, 8))
activityLabel <- activity[joinLabel[, 1], 2]
joinLabel[, 1] <- activityLabel
names(joinLabel) <- "activity"
# Step4. Appropriately labels the data set with descriptive activity
# names.
names(joinSubject) <- "subject"
cleanedData <- cbind(joinSubject, joinLabel, joinData)
dim(cleanedData) # 10299*68
write.table(cleanedData, "merged_data.txt") # write out the 1st dataset
# Step5. Creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
subjectLen <- length(table(joinSubject)) # 30
activityLen <- dim(activity)[1] # 6
columnLen <- dim(cleanedData)[2]
result <- matrix(NA, nrow=subjectLen*activityLen, ncol=columnLen)
result <- as.data.frame(result)
colnames(result) <- colnames(cleanedData)
row <- 1
for(i in 1:subjectLen) {
for(j in 1:activityLen) {
result[row, 1] <- sort(unique(joinSubject)[, 1])[i]
result[row, 2] <- activity[j, 2]
bool1 <- i == cleanedData$subject
bool2 <- activity[j, 2] == cleanedData$activity
result[row, 3:columnLen] <- colMeans(cleanedData[bool1&bool2, 3:columnLen])
row <- row + 1
}
}
head(result)
write.table(result, "data_with_means.txt") # write out the 2nd dataset
# data <- read.table("./data_with_means.txt")
# data[1:12, 1:3] | /run_analysis.R | no_license | hemanmehta/Getting-and-Cleaning-Data-Course-Project | R | false | false | 2,967 | r | # Step1. Merges the training and the test sets to create one data set.
# setwd("G:/Coursera/courseraworkspace/gettingcleaningdata")
trainData <- read.table("./data/train/X_train.txt")
dim(trainData) # 7352*561
head(trainData)
trainLabel <- read.table("./data/train/y_train.txt")
table(trainLabel)
trainSubject <- read.table("./data/train/subject_train.txt")
testData <- read.table("./data/test/X_test.txt")
dim(testData) # 2947*561
testLabel <- read.table("./data/test/y_test.txt")
table(testLabel)
testSubject <- read.table("./data/test/subject_test.txt")
joinData <- rbind(trainData, testData)
dim(joinData) # 10299*561
joinLabel <- rbind(trainLabel, testLabel)
dim(joinLabel) # 10299*1
joinSubject <- rbind(trainSubject, testSubject)
dim(joinSubject) # 10299*1
# Step2. Extracts only the measurements on the mean and standard
# deviation for each measurement.
features <- read.table("./data/features.txt")
dim(features) # 561*2
meanStdIndices <- grep("mean\\(\\)|std\\(\\)", features[, 2])
length(meanStdIndices) # 66
joinData <- joinData[, meanStdIndices]
dim(joinData) # 10299*66
names(joinData) <- gsub("\\(\\)", "", features[meanStdIndices, 2]) # remove "()"
names(joinData) <- gsub("mean", "Mean", names(joinData)) # capitalize M
names(joinData) <- gsub("std", "Std", names(joinData)) # capitalize S
names(joinData) <- gsub("-", "", names(joinData)) # remove "-" in column names
# Step3. Uses descriptive activity names to name the activities in
# the data set
activity <- read.table("./data/activity_labels.txt")
activity[, 2] <- tolower(gsub("_", "", activity[, 2]))
substr(activity[2, 2], 8, 8) <- toupper(substr(activity[2, 2], 8, 8))
substr(activity[3, 2], 8, 8) <- toupper(substr(activity[3, 2], 8, 8))
activityLabel <- activity[joinLabel[, 1], 2]
joinLabel[, 1] <- activityLabel
names(joinLabel) <- "activity"
# Step4. Appropriately labels the data set with descriptive activity
# names.
names(joinSubject) <- "subject"
cleanedData <- cbind(joinSubject, joinLabel, joinData)
dim(cleanedData) # 10299*68
write.table(cleanedData, "merged_data.txt") # write out the 1st dataset
# Step5. Creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
subjectLen <- length(table(joinSubject)) # 30
activityLen <- dim(activity)[1] # 6
columnLen <- dim(cleanedData)[2]
result <- matrix(NA, nrow=subjectLen*activityLen, ncol=columnLen)
result <- as.data.frame(result)
colnames(result) <- colnames(cleanedData)
row <- 1
for(i in 1:subjectLen) {
for(j in 1:activityLen) {
result[row, 1] <- sort(unique(joinSubject)[, 1])[i]
result[row, 2] <- activity[j, 2]
bool1 <- i == cleanedData$subject
bool2 <- activity[j, 2] == cleanedData$activity
result[row, 3:columnLen] <- colMeans(cleanedData[bool1&bool2, 3:columnLen])
row <- row + 1
}
}
head(result)
write.table(result, "data_with_means.txt") # write out the 2nd dataset
# data <- read.table("./data_with_means.txt")
# data[1:12, 1:3] |
# 学部ごとの人数を集計する
count <- apply(UCBAdmissions, 3L, sum)
# 円グラフをプロットする
pie(
count,
main = "UC Berkeleyの学部別大学院進学希望者",
col = gray.colors(length(count)) # グレースケールで表示
) | /chapter9/pie.R | no_license | AnguillaJaponica/RProgramming | R | false | false | 264 | r | # 学部ごとの人数を集計する
count <- apply(UCBAdmissions, 3L, sum)
# 円グラフをプロットする
pie(
count,
main = "UC Berkeleyの学部別大学院進学希望者",
col = gray.colors(length(count)) # グレースケールで表示
) |
## Knits all lecture notes for printing and regular
# project directory is one level above, so need to reference files with this directory in path
rm(list = ls())
for (iknit in list.files("./corelecturenotes", "\\.Rmd$")) {
print(iknit)
partname<-substr(iknit, 1, 5)
rmarkdown::render(paste0("corelecturenotes/",iknit),
params = list(notes=FALSE))
rmarkdown::render(paste0("corelecturenotes/",iknit),
output_options=list(toc=FALSE,toc_float=FALSE),
output_file = paste0('printable/',partname,'_wnotes.html'),
params = list(notes=TRUE))
rm(list = ls())
}
| /corelecturenotes/knitall.r | no_license | nuitrcs/r_intro_june2018 | R | false | false | 641 | r | ## Knits all lecture notes for printing and regular
# project directory is one level above, so need to reference files with this directory in path
rm(list = ls())
for (iknit in list.files("./corelecturenotes", "\\.Rmd$")) {
print(iknit)
partname<-substr(iknit, 1, 5)
rmarkdown::render(paste0("corelecturenotes/",iknit),
params = list(notes=FALSE))
rmarkdown::render(paste0("corelecturenotes/",iknit),
output_options=list(toc=FALSE,toc_float=FALSE),
output_file = paste0('printable/',partname,'_wnotes.html'),
params = list(notes=TRUE))
rm(list = ls())
}
|
#### Before running this script ensure that all the excel files and R scripts are placed in your R woking directory which can be checked by getwd()
#### The script creates the folders : parameters , errors and confidence_intervals in the R working directory.This folder contains the estimated parameters, standard errors and confidence intervals
# #### The script may take a couple of hours to run.
#
dir.create("parameters")
dir.create("errors")
dir.create("confidence_intervals")
source("Step-1-Data-Entry.r")
source("Step-2-Data-Extraction.r")
source("Step-3-Data-Extraction.r")
source("Step-4-Defining-Functions.r")
source("Step-5-EM.r")
write.csv(first_mix_it[,t+1], "parameters/First_mix.csv")
write.csv(1/mu_d_it[,t+1], "parameters/T_d.csv")
write.csv(1/mu_s_it[,t+1], "parameters/T_s.csv")
write.csv(Sens_it[,t+1], "parameters/Sensitivity.csv")
write.csv(TPM_it[,,t+1], "parameters/TPM.csv")
write.csv(Num[,1:Category,1,t], "parameters/Num_1.csv")
write.csv(Num[,1:Category,2,t], "parameters/Num_2.csv")
write.csv(Num[,1:Category,3,t], "parameters/Num_3.csv")
write.csv(Num[,1:Category,4,t], "parameters/Num_4.csv")
write.csv(Num[,1:Category,5,t], "parameters/Num_5.csv")
write.csv(W_TPM_num[,,t+1], "parameters/W_TPM_num.csv")
write.csv(S_TPM_num[,,t+1], "parameters/S_TPM_num.csv")
write.csv(Expz[,,t],"parameters/Expz.csv")
write.csv(TPM_num[,,t+1],"parameters/TPM_num.csv")
source("Step6-Reading parameters.r")
source("Step7-Ic.r")
source("Step8-Im.r")
source("Step 9 - Confidence intervals.r")
proc.time() - ptm
| /Run_Me.R | no_license | simrita/TB-Pathways-Modeling-Patna | R | false | false | 1,574 | r | #### Before running this script ensure that all the excel files and R scripts are placed in your R woking directory which can be checked by getwd()
#### The script creates the folders : parameters , errors and confidence_intervals in the R working directory.This folder contains the estimated parameters, standard errors and confidence intervals
# #### The script may take a couple of hours to run.
#
dir.create("parameters")
dir.create("errors")
dir.create("confidence_intervals")
source("Step-1-Data-Entry.r")
source("Step-2-Data-Extraction.r")
source("Step-3-Data-Extraction.r")
source("Step-4-Defining-Functions.r")
source("Step-5-EM.r")
write.csv(first_mix_it[,t+1], "parameters/First_mix.csv")
write.csv(1/mu_d_it[,t+1], "parameters/T_d.csv")
write.csv(1/mu_s_it[,t+1], "parameters/T_s.csv")
write.csv(Sens_it[,t+1], "parameters/Sensitivity.csv")
write.csv(TPM_it[,,t+1], "parameters/TPM.csv")
write.csv(Num[,1:Category,1,t], "parameters/Num_1.csv")
write.csv(Num[,1:Category,2,t], "parameters/Num_2.csv")
write.csv(Num[,1:Category,3,t], "parameters/Num_3.csv")
write.csv(Num[,1:Category,4,t], "parameters/Num_4.csv")
write.csv(Num[,1:Category,5,t], "parameters/Num_5.csv")
write.csv(W_TPM_num[,,t+1], "parameters/W_TPM_num.csv")
write.csv(S_TPM_num[,,t+1], "parameters/S_TPM_num.csv")
write.csv(Expz[,,t],"parameters/Expz.csv")
write.csv(TPM_num[,,t+1],"parameters/TPM_num.csv")
source("Step6-Reading parameters.r")
source("Step7-Ic.r")
source("Step8-Im.r")
source("Step 9 - Confidence intervals.r")
proc.time() - ptm
|
#Kimminbeom_1209/1910
# * 실습 결과를 R Script file로 제출
# * R Script file 이름은 "영문본인이름_제출일날짜.R" 부여하여 제출
# * R Script file의 처음에 주석으로 본인 이름과 작성일/제출일 기록
#
# 문1)
# R에서 제공하는 mtcars 데이터셋에서 gear(기어의 수)에 대해 ggplot으로
# 막대그래프를 작성하시오. 단, 제목과 x축 레이블은 ‘기어의 수’, y축 레이블
# 은 ‘빈도수’로 나타내시오.
#
install.packages('tidyverse')
library(tidyverse)
library(ggplot2)
class(mtcars)
ggplot(data=mtcars,mapping=aes(x=mtcars$gear))+
geom_bar(width=0.7,fill='blue')+
labs(x='기어의 수',y='빈도수')
# 문2)
# R에서 제공하는 mtcars 데이터셋에서 cyl(실린더의 수)에 대해 막대 색이
# 초록색인 막대그래프를 ggplot으로 작성하시오.
#
ggplot(data=mtcars,mapping = aes(x=mtcars$cyl))+
geom_bar(fill='green')+
labs(x='실린더의 수')
# 문3)
# R에서 제공하는 mtcars 데이터셋에서 mpg(연비)에 대해 구간 간격이 5.0
# 인 히스토그램을 ggplot으로 작성하시오.
#
ggplot(data = mtcars,mapping = aes(x=mtcars$mpg))+
geom_histogram(binwidth = 5)
# 문4)
# R에서 제공하는 trees 데이터셋의 Girth(나무 둘레)에 대해 ggplot으로
# 히스토그램을 작성하시오. 여기에서는 히스토그램의 제목, x축 레이블, y축
# 레이블을 한글로 표시하시오. (구간 간격은 3.0, 막대의 색은 steelblue로 한다.)
#
library(treemap)
trees
class(trees)
ggplot(data=trees,mapping= aes(x=Girth))+
geom_histogram(binwidth = 3,fill='steelblue')+
labs(x='Girth',y='레이블')
# 문5)
# R에서 제공하는 mtcars 데이터셋에서 mpg(연비)를 x축으로 하고, wt(중
# 량)를 y축으로 하는 산점도를 ggplot으로 작성하시오. (단, 점의 색은 gear의
# 수에 따라 다르게 표시한다.
mtcars
ggplot(data=mtcars,mapping = aes(x= mtcars$mpg,y= mtcars$wt,col=gear,fill=gear))+geom_point()
#
# 문6)
# R에서 제공하는 mtcars 데이터셋에서 mpg(연비)에 대해 ggplot으로 상
# 자그림을 작성하되, cyl(실린더 수)에 따라 그룹을 나누어 작성하시오.
#
ggplot(data=mtcars,mapping = aes(y=mtcars$mpg,x=mtcars$cyl,group=cyl,col=cyl,fill=cyl))+geom_boxplot()
# 문7)
# 다음은 2015년부터 2026년도까지의 예상 인구수 추계 자료이다. 연도를
# x축으로 하여 ggplot으로 선그래프를 작성하시오.
#
# 연도 총인구 (천명) 연도 총인구 (천명)
# 2015 51014 2021 52123
# 2016 51245 2022 52261
# 2017 51446 2023 52388
# 2018 51635 2024 52504
# 2019 51811 2025 52609
# 2020 51973 2026 52704
#
yea <- c(2015,2016,2017,2018,2019,2020,2021,2022,2023,2024,2025,2026)
pp <- c(51014,51245,51446,51635,51911,51973,52123,52261,52388,52504,52609,52704)
total <- data.frame(yea,pp)
total
colnames(total) <- c('연도','총인구')
total
ggplot(data=total,mapping=aes(x=total$연도,y=total$총인구))+geom_line(col='red')+
labs(x='연도',y='총인구(천명)')
# 문8)
# 다음과 같이 데이터셋 us를 생성한 후 물음에 답하시오. 여기서 state.x77
# 은 미국 50개 주의 통계정보가, state.division은 미국 50개 주의 지역 구분
# (예: 북부, 중부, 남부……) 정보가 저장된 데이터셋이다.
#
# us <- data.frame(state.x77, state.division)
class(state.x77)
us <- data.frame(state.x77, state.division)
state.division
us <- data.frame(us,usname=rownames(us))
us
#
# (1) 미국 50개 주에 대해 각각의 주들이 지역구분별로 묶인 트리맵을 작성하시오.
# 또한, 타일의 면적은 Population(인구수), 타일의 색은 Income(소득)으로 나타내고,
# 각각의 타일에는 주의 이름을 표시하시오. 마지막으로 이 트리맵에서 관찰할 수 있
# 는 것이 무엇인지 설명하시오
#
state.division
library(treemap)
treemap(us,
index = c('state.division','usname'),
vSize = 'Population',
vColor = 'Income',
type='value',
title='U.S.A')
#일단 East North Central 이 미국에서 가장 인구가 많은 지역이다. 인구가 많은 지역일수록 타일의 색이 진한것을 보아 소득도 많다는것을 알 수 있다.
# (2) 미국 50개 주에 대해 각각의 주들이 지역구분별로 묶인 트리맵을 작성하시오.
# 또한, 타일의 면적은 HS.Grad(고등학교 졸업률), 타일의 색은 Murder(범죄률)로 나타
# 내고, 각각의 타일에는 주의 이름을 표시하시오. 마지막으로 이 트리맵에서 관찰할
# 수 있는 것이 무엇인지 설명하시오.
#
treemap(us,
index = c('state.division','usname'),
vSize = 'HS.Grad',
vColor = 'Murder',
type='value',
title='U.S.A')
#모든 타일이 거의 균일하게 보이는데 그 중에서 범죄률이 높은 지역들은 타일이 가장 작거나 특정한 주의 소속되어있는것으로 보인다. 특히 East South Central은 고등학교 졸업률과는 상관없이 전부 범죄율이 높은것을 알 수 있다.
# (3) us 데이터셋에 대해 x축은 Income(소득), y축은 Illiteracy(문맹률), 원의 크기는
# Population(인구수), 원의 색은 green(초록색), 원 내부에는 주의 이름을 표시한 버
# 블차트를 작성하시오. 또한 이 버블차트에서 관찰할 수 있는 것이 무엇인지 설명하
# 시오.
#
symbols(us$Income,us$Illiteracy,circles = st$Population,bg='green')
text(us$Income,us$Illiteracy,rownames(us))
#문맹률이 높을수록 다른지역보다 소득이 적은것을 알수 있다.
# (4) us 데이터셋에 대해 x축은 Illiteracy(문맹률), y축은 Murder(범죄률), 원의 크기
# 는 Area(면적), 원의 색은 green(초록색), 원 내부에는 주의 이름을 표시한 버블차트
# 를 작성하시오. 또한 이 버블차트에서 관찰할 수 있는 것이 무엇인지 설명하시오.
symbols(us$Illiteracy,us$Murder,circles = st$Area,bg='green')
text(us$Illiteracy,us$Murder,rownames(us))
#면적과 범죄률과는 관계가 없으나 문맹률이 높을 수록범죄률이 높게 나오는것을 볼 수 있다. 또한 문맹률이 0에 가까운 지역일 수록 범죄율이 적다.
| /DAY10_quiz.R | no_license | minbeom-151/WorkR | R | false | false | 6,461 | r | #Kimminbeom_1209/1910
# * 실습 결과를 R Script file로 제출
# * R Script file 이름은 "영문본인이름_제출일날짜.R" 부여하여 제출
# * R Script file의 처음에 주석으로 본인 이름과 작성일/제출일 기록
#
# 문1)
# R에서 제공하는 mtcars 데이터셋에서 gear(기어의 수)에 대해 ggplot으로
# 막대그래프를 작성하시오. 단, 제목과 x축 레이블은 ‘기어의 수’, y축 레이블
# 은 ‘빈도수’로 나타내시오.
#
install.packages('tidyverse')
library(tidyverse)
library(ggplot2)
class(mtcars)
ggplot(data=mtcars,mapping=aes(x=mtcars$gear))+
geom_bar(width=0.7,fill='blue')+
labs(x='기어의 수',y='빈도수')
# 문2)
# R에서 제공하는 mtcars 데이터셋에서 cyl(실린더의 수)에 대해 막대 색이
# 초록색인 막대그래프를 ggplot으로 작성하시오.
#
ggplot(data=mtcars,mapping = aes(x=mtcars$cyl))+
geom_bar(fill='green')+
labs(x='실린더의 수')
# 문3)
# R에서 제공하는 mtcars 데이터셋에서 mpg(연비)에 대해 구간 간격이 5.0
# 인 히스토그램을 ggplot으로 작성하시오.
#
ggplot(data = mtcars,mapping = aes(x=mtcars$mpg))+
geom_histogram(binwidth = 5)
# 문4)
# R에서 제공하는 trees 데이터셋의 Girth(나무 둘레)에 대해 ggplot으로
# 히스토그램을 작성하시오. 여기에서는 히스토그램의 제목, x축 레이블, y축
# 레이블을 한글로 표시하시오. (구간 간격은 3.0, 막대의 색은 steelblue로 한다.)
#
library(treemap)
trees
class(trees)
ggplot(data=trees,mapping= aes(x=Girth))+
geom_histogram(binwidth = 3,fill='steelblue')+
labs(x='Girth',y='레이블')
# 문5)
# R에서 제공하는 mtcars 데이터셋에서 mpg(연비)를 x축으로 하고, wt(중
# 량)를 y축으로 하는 산점도를 ggplot으로 작성하시오. (단, 점의 색은 gear의
# 수에 따라 다르게 표시한다.
mtcars
ggplot(data=mtcars,mapping = aes(x= mtcars$mpg,y= mtcars$wt,col=gear,fill=gear))+geom_point()
#
# 문6)
# R에서 제공하는 mtcars 데이터셋에서 mpg(연비)에 대해 ggplot으로 상
# 자그림을 작성하되, cyl(실린더 수)에 따라 그룹을 나누어 작성하시오.
#
ggplot(data=mtcars,mapping = aes(y=mtcars$mpg,x=mtcars$cyl,group=cyl,col=cyl,fill=cyl))+geom_boxplot()
# 문7)
# 다음은 2015년부터 2026년도까지의 예상 인구수 추계 자료이다. 연도를
# x축으로 하여 ggplot으로 선그래프를 작성하시오.
#
# 연도 총인구 (천명) 연도 총인구 (천명)
# 2015 51014 2021 52123
# 2016 51245 2022 52261
# 2017 51446 2023 52388
# 2018 51635 2024 52504
# 2019 51811 2025 52609
# 2020 51973 2026 52704
#
yea <- c(2015,2016,2017,2018,2019,2020,2021,2022,2023,2024,2025,2026)
pp <- c(51014,51245,51446,51635,51911,51973,52123,52261,52388,52504,52609,52704)
total <- data.frame(yea,pp)
total
colnames(total) <- c('연도','총인구')
total
ggplot(data=total,mapping=aes(x=total$연도,y=total$총인구))+geom_line(col='red')+
labs(x='연도',y='총인구(천명)')
# 문8)
# 다음과 같이 데이터셋 us를 생성한 후 물음에 답하시오. 여기서 state.x77
# 은 미국 50개 주의 통계정보가, state.division은 미국 50개 주의 지역 구분
# (예: 북부, 중부, 남부……) 정보가 저장된 데이터셋이다.
#
# us <- data.frame(state.x77, state.division)
class(state.x77)
us <- data.frame(state.x77, state.division)
state.division
us <- data.frame(us,usname=rownames(us))
us
#
# (1) 미국 50개 주에 대해 각각의 주들이 지역구분별로 묶인 트리맵을 작성하시오.
# 또한, 타일의 면적은 Population(인구수), 타일의 색은 Income(소득)으로 나타내고,
# 각각의 타일에는 주의 이름을 표시하시오. 마지막으로 이 트리맵에서 관찰할 수 있
# 는 것이 무엇인지 설명하시오
#
state.division
library(treemap)
treemap(us,
index = c('state.division','usname'),
vSize = 'Population',
vColor = 'Income',
type='value',
title='U.S.A')
#일단 East North Central 이 미국에서 가장 인구가 많은 지역이다. 인구가 많은 지역일수록 타일의 색이 진한것을 보아 소득도 많다는것을 알 수 있다.
# (2) 미국 50개 주에 대해 각각의 주들이 지역구분별로 묶인 트리맵을 작성하시오.
# 또한, 타일의 면적은 HS.Grad(고등학교 졸업률), 타일의 색은 Murder(범죄률)로 나타
# 내고, 각각의 타일에는 주의 이름을 표시하시오. 마지막으로 이 트리맵에서 관찰할
# 수 있는 것이 무엇인지 설명하시오.
#
treemap(us,
index = c('state.division','usname'),
vSize = 'HS.Grad',
vColor = 'Murder',
type='value',
title='U.S.A')
#모든 타일이 거의 균일하게 보이는데 그 중에서 범죄률이 높은 지역들은 타일이 가장 작거나 특정한 주의 소속되어있는것으로 보인다. 특히 East South Central은 고등학교 졸업률과는 상관없이 전부 범죄율이 높은것을 알 수 있다.
# (3) us 데이터셋에 대해 x축은 Income(소득), y축은 Illiteracy(문맹률), 원의 크기는
# Population(인구수), 원의 색은 green(초록색), 원 내부에는 주의 이름을 표시한 버
# 블차트를 작성하시오. 또한 이 버블차트에서 관찰할 수 있는 것이 무엇인지 설명하
# 시오.
#
symbols(us$Income,us$Illiteracy,circles = st$Population,bg='green')
text(us$Income,us$Illiteracy,rownames(us))
#문맹률이 높을수록 다른지역보다 소득이 적은것을 알수 있다.
# (4) us 데이터셋에 대해 x축은 Illiteracy(문맹률), y축은 Murder(범죄률), 원의 크기
# 는 Area(면적), 원의 색은 green(초록색), 원 내부에는 주의 이름을 표시한 버블차트
# 를 작성하시오. 또한 이 버블차트에서 관찰할 수 있는 것이 무엇인지 설명하시오.
symbols(us$Illiteracy,us$Murder,circles = st$Area,bg='green')
text(us$Illiteracy,us$Murder,rownames(us))
#면적과 범죄률과는 관계가 없으나 문맹률이 높을 수록범죄률이 높게 나오는것을 볼 수 있다. 또한 문맹률이 0에 가까운 지역일 수록 범죄율이 적다.
|
earnings <- c(50,100,30)
earnings * 3
earnings / 2
earnings + 0.5
earnings
earnings - 1
earnings ^ 2
expenses <- c(30, 40, 80)
earnings - expenses
earnings + c(10, 20, 30)
earnings * c (1, 2, 3)
earnings / c(1, 2, 3)
bank <- earnings - expenses
sum(bank)
# Logical operations
earnings > expenses
| /intro/VectorArithmetic.R | no_license | sunilkumarbm/MyR | R | false | false | 338 | r | earnings <- c(50,100,30)
earnings * 3
earnings / 2
earnings + 0.5
earnings
earnings - 1
earnings ^ 2
expenses <- c(30, 40, 80)
earnings - expenses
earnings + c(10, 20, 30)
earnings * c (1, 2, 3)
earnings / c(1, 2, 3)
bank <- earnings - expenses
sum(bank)
# Logical operations
earnings > expenses
|
# Create Shiny app object
shinyApp(ui = ui, server = server)
| /ShinyApp/shiny.R | no_license | lynkeib/WebProjects | R | false | false | 61 | r | # Create Shiny app object
shinyApp(ui = ui, server = server)
|
# mops_ppse analysis Script Basantpur 2017-09-19
rm(list=ls())
# Working directory and functions ----
setwd("D:/Masterarbeit/Modellierung/Calibration_Ann")
source("calibration/Kalib_Basantpur_imd4_7_funs.R")
# SETTINGS ----
periods <- data.frame(begin=ISOdatetime(2001,3,2,8,0,0),end=ISOdatetime(2010,12,31,23,0,0))
periods.ignore <- data.frame(begin=ISOdatetime(1988,1,1,0,0,0),end=ISOdatetime(1989,12,31,0,0,0))
outdir <- "calibration/out"
mymodelpath <- "D:/Masterarbeit/Modellierung/Echse/echse_engines/bin/hypsoRR" # ggf. ändern in ../Echse/
myrangetable <- "calibration/tbl_ranges_bigger.txt"
sim_file <- c(qx_avg="calibration/out/gage_Basantpur.txt")
sim_colTime <- c("end_of_interval")
sim_colValue <- c("qx_avg")
obs_file <- read.table("calibration/flow_Basantpur_0.txt", sep="\t", stringsAsFactors=FALSE, colClasses=c("POSIXct","numeric"), header=TRUE)
obs_colTime <- c("end_of_interval")
mops_log <- "calibration/mcs_and_pso.log"
ppso_log <- "calibration/dds.log"
ppso_proj <- "calibration/dds.pro"
parameter_fmt <- "" # set to "" for ECHSE and "%13.4f" for HBV Nordic
if_fails_clear <- c("out", "phiMod.err.html", "phiMod.log") #for ECHSE: c("out", "phiMod.err.html", "phiMod.log")
process_catfiles_args <- c( # Argument vector for process_catfiles
pattern = "calibration/out/cat_*.txt",
timecol = "end_of_interval",
varcol = "etr",
outfile = "calibration/outMonthly/etr_monthly.txt",
outfileq= "calibration/outMonthly/q_monthly.txt"
)
model_args <- c(
file_control ="calibration/cnf_imd4_Basantpur_Ann.txt",
file_err = paste(outdir,"cnf_raingages.err.html",sep="/"),
file_log = paste(outdir,"cnf_raingages.log",sep="/"),
format_err = "html",
silent = "true",
table_objectDeclaration = "out_csa100_Arc_Gis_splitting/Basantpur/objDecl.gage_Basantpur.txt",
cat_hypsoRR_numParamsIndividual= "calibration/paramNum_cat_MCS_template_updated_mult_str_surf.txt",
cat_hypsoRR_numParamsShared = "../echse_projekt_Ann/data/params/cat_num_shared.txt",
outputDirectory = outdir,
file_template = "calibration/paramNum_cat_MCS_template_maik.txt", # vorher paramNum_cat_MCS_template2_new.txt
file_result = "calibration/paramNum_cat_MCS_template_updated_mult_str_surf.txt",
char_open = "{",
char_close = "}"
)
# read parameter ranges
param_bounds <- read.table(file=myrangetable,
header=TRUE,
sep="\t",
colClasses= c("character","numeric","numeric"))[,2:3]
rownames(param_bounds) <- read.table(file=myrangetable,
header=TRUE,
sep="\t",
colClasses= c("character","numeric","numeric"))[,1]
# Try things:
# readcat("calibration/out/cat_460.txt")
# process_catfiles(NULL, NULL, process_catfiles_args)
# plot(as.POSIXct(out$month), out$etr, type="l")
if(FALSE){ #don't run this part when sourcing, because it takes ages to run
# Calibration ----
# Call the optimisation routine from ppso package, using our objective function
# ############################
# Berry says: optim_dds tries to *minimize* mNSE / total_error.
# Maybe objfunc should be changed to return - mNSE ?
# ############################
result = ppso::optim_dds(
objective_function = objfuncQ, # objfuncQ to calibrate only on discharge, objfunc to calibrate on both discharge and ETR
number_of_parameters = length(param_bounds[,1]),
number_of_particles = 1,
max_number_function_calls = 500,
r = 0.2,
abstol = -Inf,
reltol = -Inf,
max_wait_iterations = 50,
parameter_bounds = param_bounds,
lhc_init = TRUE,
do_plot = NULL,
wait_for_keystroke = FALSE,
logfile = ppso_log,
projectfile = ppso_proj,
load_projectfile = "no",
break_file = NULL,
plot_progress = FALSE,
tryCall = FALSE)
# plot progress ----
ppso::plot_optimization_progress(
logfile = ppso_log,
projectfile = ppso_proj,
progress_plot_filename = NULL,
goodness_plot_filename = NULL,
cutoff_quantile = 0.95,
verbose = FALSE)
# produce data for plotting, run objfunc with best parameters ----
best_params <- t(read.table(ppso_proj, header=TRUE, sep="\t")[,1:11])[,1]
names(best_params) <- gsub("best_", "", names(best_params))
#run_with_best_params <- objfunc(best_params, keepmonthly=TRUE)
run_with_best_paramsQ <- objfuncQ(best_params, keepmonthly=TRUE)
obs_file$end_of_interval <- as.Date(obs_file$end_of_interval, format="%Y-%m-%d %H:%M:%S")
obs_file_subset <- subset(obs_file)
mfiles <- dir(dirname(process_catfiles_args["outfileq"]), full.names=TRUE); mfiles
monthly <- read.table(tail(mfiles,1), header=TRUE, stringsAsFactors=FALSE, sep="\t")
monthly$end_of_interval <- as.Date(monthly$end_of_interval, format="%Y-%m-%d %H:%M:%S")
# insert here aggregate - Fun = mean
merged_data <- merge(monthly, obs_file, by="end_of_interval", all.x=T)
merged_data$end_of_interval <- as.Date(merged_data$end_of_interval, format="%Y-%m-%d")
plot(merged_data$end_of_interval, merged_data$qx_avg.y, xaxt="n", type="l", las=1)
lines(merged_data$end_of_interval, merged_data$qx_avg.x, col="red")
berryFunctions::monthAxis(ym=TRUE)
max(obs_file$qx_avg) # BLACK (observed streamflow)
max(monthly$qx_avg) # RED -> (calibrated streamflow)
}
| /Calib_Basantpur_imd4_7_code.R | no_license | ann-kra/mahanadi | R | false | false | 6,014 | r |
# mops_ppse analysis Script Basantpur 2017-09-19
rm(list=ls())
# Working directory and functions ----
setwd("D:/Masterarbeit/Modellierung/Calibration_Ann")
source("calibration/Kalib_Basantpur_imd4_7_funs.R")
# SETTINGS ----
periods <- data.frame(begin=ISOdatetime(2001,3,2,8,0,0),end=ISOdatetime(2010,12,31,23,0,0))
periods.ignore <- data.frame(begin=ISOdatetime(1988,1,1,0,0,0),end=ISOdatetime(1989,12,31,0,0,0))
outdir <- "calibration/out"
mymodelpath <- "D:/Masterarbeit/Modellierung/Echse/echse_engines/bin/hypsoRR" # ggf. ändern in ../Echse/
myrangetable <- "calibration/tbl_ranges_bigger.txt"
sim_file <- c(qx_avg="calibration/out/gage_Basantpur.txt")
sim_colTime <- c("end_of_interval")
sim_colValue <- c("qx_avg")
obs_file <- read.table("calibration/flow_Basantpur_0.txt", sep="\t", stringsAsFactors=FALSE, colClasses=c("POSIXct","numeric"), header=TRUE)
obs_colTime <- c("end_of_interval")
mops_log <- "calibration/mcs_and_pso.log"
ppso_log <- "calibration/dds.log"
ppso_proj <- "calibration/dds.pro"
parameter_fmt <- "" # set to "" for ECHSE and "%13.4f" for HBV Nordic
if_fails_clear <- c("out", "phiMod.err.html", "phiMod.log") #for ECHSE: c("out", "phiMod.err.html", "phiMod.log")
process_catfiles_args <- c( # Argument vector for process_catfiles
pattern = "calibration/out/cat_*.txt",
timecol = "end_of_interval",
varcol = "etr",
outfile = "calibration/outMonthly/etr_monthly.txt",
outfileq= "calibration/outMonthly/q_monthly.txt"
)
model_args <- c(
file_control ="calibration/cnf_imd4_Basantpur_Ann.txt",
file_err = paste(outdir,"cnf_raingages.err.html",sep="/"),
file_log = paste(outdir,"cnf_raingages.log",sep="/"),
format_err = "html",
silent = "true",
table_objectDeclaration = "out_csa100_Arc_Gis_splitting/Basantpur/objDecl.gage_Basantpur.txt",
cat_hypsoRR_numParamsIndividual= "calibration/paramNum_cat_MCS_template_updated_mult_str_surf.txt",
cat_hypsoRR_numParamsShared = "../echse_projekt_Ann/data/params/cat_num_shared.txt",
outputDirectory = outdir,
file_template = "calibration/paramNum_cat_MCS_template_maik.txt", # vorher paramNum_cat_MCS_template2_new.txt
file_result = "calibration/paramNum_cat_MCS_template_updated_mult_str_surf.txt",
char_open = "{",
char_close = "}"
)
# read parameter ranges
param_bounds <- read.table(file=myrangetable,
header=TRUE,
sep="\t",
colClasses= c("character","numeric","numeric"))[,2:3]
rownames(param_bounds) <- read.table(file=myrangetable,
header=TRUE,
sep="\t",
colClasses= c("character","numeric","numeric"))[,1]
# Try things:
# readcat("calibration/out/cat_460.txt")
# process_catfiles(NULL, NULL, process_catfiles_args)
# plot(as.POSIXct(out$month), out$etr, type="l")
if(FALSE){ #don't run this part when sourcing, because it takes ages to run
# Calibration ----
# Call the optimisation routine from ppso package, using our objective function
# ############################
# Berry says: optim_dds tries to *minimize* mNSE / total_error.
# Maybe objfunc should be changed to return - mNSE ?
# ############################
result = ppso::optim_dds(
objective_function = objfuncQ, # objfuncQ to calibrate only on discharge, objfunc to calibrate on both discharge and ETR
number_of_parameters = length(param_bounds[,1]),
number_of_particles = 1,
max_number_function_calls = 500,
r = 0.2,
abstol = -Inf,
reltol = -Inf,
max_wait_iterations = 50,
parameter_bounds = param_bounds,
lhc_init = TRUE,
do_plot = NULL,
wait_for_keystroke = FALSE,
logfile = ppso_log,
projectfile = ppso_proj,
load_projectfile = "no",
break_file = NULL,
plot_progress = FALSE,
tryCall = FALSE)
# plot progress ----
ppso::plot_optimization_progress(
logfile = ppso_log,
projectfile = ppso_proj,
progress_plot_filename = NULL,
goodness_plot_filename = NULL,
cutoff_quantile = 0.95,
verbose = FALSE)
# produce data for plotting, run objfunc with best parameters ----
best_params <- t(read.table(ppso_proj, header=TRUE, sep="\t")[,1:11])[,1]
names(best_params) <- gsub("best_", "", names(best_params))
#run_with_best_params <- objfunc(best_params, keepmonthly=TRUE)
run_with_best_paramsQ <- objfuncQ(best_params, keepmonthly=TRUE)
obs_file$end_of_interval <- as.Date(obs_file$end_of_interval, format="%Y-%m-%d %H:%M:%S")
obs_file_subset <- subset(obs_file)
mfiles <- dir(dirname(process_catfiles_args["outfileq"]), full.names=TRUE); mfiles
monthly <- read.table(tail(mfiles,1), header=TRUE, stringsAsFactors=FALSE, sep="\t")
monthly$end_of_interval <- as.Date(monthly$end_of_interval, format="%Y-%m-%d %H:%M:%S")
# insert here aggregate - Fun = mean
merged_data <- merge(monthly, obs_file, by="end_of_interval", all.x=T)
merged_data$end_of_interval <- as.Date(merged_data$end_of_interval, format="%Y-%m-%d")
plot(merged_data$end_of_interval, merged_data$qx_avg.y, xaxt="n", type="l", las=1)
lines(merged_data$end_of_interval, merged_data$qx_avg.x, col="red")
berryFunctions::monthAxis(ym=TRUE)
max(obs_file$qx_avg) # BLACK (observed streamflow)
max(monthly$qx_avg) # RED -> (calibrated streamflow)
}
|
##Example 10.6.2 Pg.515
##Partial correlation coefficient
w<- c(193.6,137.5,145.4,117,105.4,99.9,74,74.4,112.8,125.4,126.5,115.9,98.8,94.3,99.9,83.3,72.8,83.5,59,87.2,84.4,78.1,51.9,57.1,54.7,78.6,53.7,96,89)
p<- c(6.24,8.03,11.62,7.68,10.72,9.28,6.23,8.67,6.91,7.51,10.01,8.70,5.87,7.96,12.27,7.33,11.17,6.03,7.90,8.27,11.05,7.61,6.21,7.24,8.11,10.05,8.79,10.40,11.72)
s<- c(30.1,22.2,25.7,28.9,27.3,33.4,26.4,17.2,15.9,12.2,30,24,22.6,18.2,11.5,23.9,11.2,15.6,10.6,24.7,25.6,18.4,13.5,12.2,14.8,8.9,14.9,10.3,15.4)
reg = lm(w~p+s)
reg
summary(reg)
res1 = residuals(lm(w~p))
res2 = residuals(lm(s~p))
res3 = residuals(lm(w~s))
res4 = residuals(lm(p~s))
res5 = residuals(lm(p~w))
res6 = residuals(lm(s~w))
# use Spearman correlation coefficient to calculate the all possible partial correlations
p1 = cor(res1,res2,method = "spearman")
p2 = cor(res1,res3,method = "spearman")
p3 = cor(res1,res4,method = "spearman")
p4 = cor(res1,res5,method = "spearman")
p5 = cor(res1,res6,method = "spearman")
p6 = cor(res2,res3,method = "spearman")
p7 = cor(res2,res4,method = "spearman")
p8 = cor(res2,res5,method = "spearman")
p9 = cor(res2,res6,method = "spearman")
p10 = cor(res3,res4,method = "spearman")
p11 = cor(res3,res5,method = "spearman")
p12 = cor(res3,res6,method = "spearman")
p13 = cor(res4,res5,method = "spearman")
p14 = cor(res4,res6,method = "spearman")
p15 = cor(res5,res6,method = "spearman")
p <- c(p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14,p15)
p
#Answers might slightly differ due to approximation
| /Biostatistics:_Basic_Concepts_And_Methodology_For_The_Health_Sciences_by_Daniel_W._Wayne,_Chad_L._Cross/CH10/EX10.6.2/Ex10_6_2.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 1,566 | r | ##Example 10.6.2 Pg.515
##Partial correlation coefficient
w<- c(193.6,137.5,145.4,117,105.4,99.9,74,74.4,112.8,125.4,126.5,115.9,98.8,94.3,99.9,83.3,72.8,83.5,59,87.2,84.4,78.1,51.9,57.1,54.7,78.6,53.7,96,89)
p<- c(6.24,8.03,11.62,7.68,10.72,9.28,6.23,8.67,6.91,7.51,10.01,8.70,5.87,7.96,12.27,7.33,11.17,6.03,7.90,8.27,11.05,7.61,6.21,7.24,8.11,10.05,8.79,10.40,11.72)
s<- c(30.1,22.2,25.7,28.9,27.3,33.4,26.4,17.2,15.9,12.2,30,24,22.6,18.2,11.5,23.9,11.2,15.6,10.6,24.7,25.6,18.4,13.5,12.2,14.8,8.9,14.9,10.3,15.4)
reg = lm(w~p+s)
reg
summary(reg)
res1 = residuals(lm(w~p))
res2 = residuals(lm(s~p))
res3 = residuals(lm(w~s))
res4 = residuals(lm(p~s))
res5 = residuals(lm(p~w))
res6 = residuals(lm(s~w))
# use Spearman correlation coefficient to calculate the all possible partial correlations
p1 = cor(res1,res2,method = "spearman")
p2 = cor(res1,res3,method = "spearman")
p3 = cor(res1,res4,method = "spearman")
p4 = cor(res1,res5,method = "spearman")
p5 = cor(res1,res6,method = "spearman")
p6 = cor(res2,res3,method = "spearman")
p7 = cor(res2,res4,method = "spearman")
p8 = cor(res2,res5,method = "spearman")
p9 = cor(res2,res6,method = "spearman")
p10 = cor(res3,res4,method = "spearman")
p11 = cor(res3,res5,method = "spearman")
p12 = cor(res3,res6,method = "spearman")
p13 = cor(res4,res5,method = "spearman")
p14 = cor(res4,res6,method = "spearman")
p15 = cor(res5,res6,method = "spearman")
p <- c(p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14,p15)
p
#Answers might slightly differ due to approximation
|
# Import function
source("./src/Deep_Learning_with_R-Springer/4.DNN_Optimization_using_regularization.R")
# DNN function
DNN_model <- function(X,
Y,
X_test,
Y_test,
layers_dims,
hidden_layer_act,
output_layer_act,
optimizer,
learning_rate,
mini_batch_size,
num_epochs,
initialization,
beta,
beta1,
beta2,
epsilon,
keep_prob,
lambd,
print_cost = F){
start_time <- Sys.time()
costs <- NULL
converged = FALSE
param <- NULL
t = 0
iter = 0
set.seed = 1
seed = 10
num_classes = length(unique(Y))
parameters = initialize_params(layers_dims, initialization)
v = initialize_adam(parameters)[["V"]]
s = initialize_adam(parameters)[["S"]]
velocity = initialize_velocity(parameters)
for(i in 0:num_epochs){
seed = seed + 1
iter = iter + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
for(batch in 1:length(minibatches)){
mini_batch_X = (minibatches[[batch]][['mini_batch_X']])
mini_batch_Y = minibatches[[batch]][['mini_batch_Y']]
if(keep_prob == 1){
AL = forward_prop(mini_batch_X, parameters, hidden_layer_act,
output_layer_act)[['AL']]
caches = forward_prop(mini_batch_X, parameters, hidden_layer_act,
output_layer_act)[['caches']]
}
else if(keep_prob < 1){
AL = forward_prop_Reg(mini_batch_X, parameters, hidden_layer_act,
output_layer_act, keep_prob)[['AL']]
caches = forward_prop_Reg(mini_batch_X, parameters,
hidden_layer_act,
output_layer_act,
keep_prob)[['caches']]
dropout_matrix = forward_prop_Reg(mini_batch_X, parameters,
hidden_layer_act,
output_layer_act,
keep_prob)[['dropout_matrix']]
}
cost <- compute_cost_with_Reg(AL, mini_batch_X, mini_batch_Y,
num_classes,
parameters,
lambd,
output_layer_act)
# Backward propagation
if(lambd == 0 & keep_prob == 1){
gradients = back_prop(AL, mini_batch_Y, caches,
hidden_layer_act, output_layer_act)
}
else if(lambd != 0 & keep_prob == 1){
gradients = back_prop_Reg(AL, mini_batch_X, mini_batch_Y,
num_classes,
caches, hidden_layer_act,
output_layer_act, keep_prob = 1,
dropout_matrix, lambd)
}
else if(lambd == 0 & keep_prob < 1){
gradients = back_prop_Reg(AL, mini_batch_X, mini_batch_Y,
num_classes, caches,
hidden_layer_act,
output_layer_act, keep_prob,
dropout_matrix, lambd = 0)
}
if(optimizer == 'gd'){
parameters = update_params(parameters, gradients, learning_rate)
}
else if(optimizer == 'momentum'){
parameters = update_params_with_momentum(parameters, gradients, velocity,
beta,
learning_rate)[["parameters"]]
velocity = update_params_with_momentum(parameters, gradients, velocity,
beta,
learning_rate)[["Velocity"]]
}
else if(optimizer == 'adam'){
t = t + 1
parameters = update_params_with_adam(parameters, gradients, v, s, t,
beta1, beta2,
learning_rate,
epsilon)[["parameters"]]
v = update_params_with_adam(parameters, gradients, v, s, t,
beta1, beta2,
learning_rate,
epsilon)[["Velocity"]]
s = update_params_with_adam(parameters, gradients, v, s, t,
beta1, beta2,
learning_rate,
epsilon)[["S"]]
}
}
costs <- append(costs, list(cost))
if(print_cost == T & i %% 1000 == 0){
cat(sprintf("Cost after epoch %d = %05f\n", i, cost))
}
}
if(output_layer_act != 'softmax'){
pred_train <- predict_model(parameters, X,
hidden_layer_act,
output_layer_act)
Tr_acc <- mean(pred_train == Y) * 100
pred_test <- predict_model(parameters, X_test,
hidden_layer_act,
output_layer_act)
Ts_acc <- mean(pred_test == Y_test) * 100
cat(sprintf("Cost after epoch %d, = %05f;
Train Acc: %#.3f, Test Acc: %#.3f, \n",
i, cost, Tr_acc, Ts_acc))
}
else if(output_layer_act == 'softmax'){
pred_train <- predict_model(parameters, X,
hidden_layer_act, output_layer_act)
Tr_acc <- mean((pred_train - 1) == Y)
pred_test <- predict_model(parameters, X_test,
hidden_layer_act, output_layer_act)
Ts_acc <- mean((pred_test - 1) == Y_test)
cat(sprintf("Cost after epoch , %d, = %05f;
Train Acc: %#.3f, Test Acc: %#.3f, \n",
i, cost, Tr_acc, Ts_acc))
}
end_time <- Sys.time()
cat(sprintf("Application running time: %#.3f seconds\n",
end_time - start_time ))
return(list("parameters" = parameters, "costs" = costs))
} | /src/Deep_Learning_with_R-Springer/5.DNN_Model.R | no_license | mohrosidi/deep_learning_r | R | false | false | 6,303 | r | # Import function
source("./src/Deep_Learning_with_R-Springer/4.DNN_Optimization_using_regularization.R")
# DNN function
DNN_model <- function(X,
Y,
X_test,
Y_test,
layers_dims,
hidden_layer_act,
output_layer_act,
optimizer,
learning_rate,
mini_batch_size,
num_epochs,
initialization,
beta,
beta1,
beta2,
epsilon,
keep_prob,
lambd,
print_cost = F){
start_time <- Sys.time()
costs <- NULL
converged = FALSE
param <- NULL
t = 0
iter = 0
set.seed = 1
seed = 10
num_classes = length(unique(Y))
parameters = initialize_params(layers_dims, initialization)
v = initialize_adam(parameters)[["V"]]
s = initialize_adam(parameters)[["S"]]
velocity = initialize_velocity(parameters)
for(i in 0:num_epochs){
seed = seed + 1
iter = iter + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
for(batch in 1:length(minibatches)){
mini_batch_X = (minibatches[[batch]][['mini_batch_X']])
mini_batch_Y = minibatches[[batch]][['mini_batch_Y']]
if(keep_prob == 1){
AL = forward_prop(mini_batch_X, parameters, hidden_layer_act,
output_layer_act)[['AL']]
caches = forward_prop(mini_batch_X, parameters, hidden_layer_act,
output_layer_act)[['caches']]
}
else if(keep_prob < 1){
AL = forward_prop_Reg(mini_batch_X, parameters, hidden_layer_act,
output_layer_act, keep_prob)[['AL']]
caches = forward_prop_Reg(mini_batch_X, parameters,
hidden_layer_act,
output_layer_act,
keep_prob)[['caches']]
dropout_matrix = forward_prop_Reg(mini_batch_X, parameters,
hidden_layer_act,
output_layer_act,
keep_prob)[['dropout_matrix']]
}
cost <- compute_cost_with_Reg(AL, mini_batch_X, mini_batch_Y,
num_classes,
parameters,
lambd,
output_layer_act)
# Backward propagation
if(lambd == 0 & keep_prob == 1){
gradients = back_prop(AL, mini_batch_Y, caches,
hidden_layer_act, output_layer_act)
}
else if(lambd != 0 & keep_prob == 1){
gradients = back_prop_Reg(AL, mini_batch_X, mini_batch_Y,
num_classes,
caches, hidden_layer_act,
output_layer_act, keep_prob = 1,
dropout_matrix, lambd)
}
else if(lambd == 0 & keep_prob < 1){
gradients = back_prop_Reg(AL, mini_batch_X, mini_batch_Y,
num_classes, caches,
hidden_layer_act,
output_layer_act, keep_prob,
dropout_matrix, lambd = 0)
}
if(optimizer == 'gd'){
parameters = update_params(parameters, gradients, learning_rate)
}
else if(optimizer == 'momentum'){
parameters = update_params_with_momentum(parameters, gradients, velocity,
beta,
learning_rate)[["parameters"]]
velocity = update_params_with_momentum(parameters, gradients, velocity,
beta,
learning_rate)[["Velocity"]]
}
else if(optimizer == 'adam'){
t = t + 1
parameters = update_params_with_adam(parameters, gradients, v, s, t,
beta1, beta2,
learning_rate,
epsilon)[["parameters"]]
v = update_params_with_adam(parameters, gradients, v, s, t,
beta1, beta2,
learning_rate,
epsilon)[["Velocity"]]
s = update_params_with_adam(parameters, gradients, v, s, t,
beta1, beta2,
learning_rate,
epsilon)[["S"]]
}
}
costs <- append(costs, list(cost))
if(print_cost == T & i %% 1000 == 0){
cat(sprintf("Cost after epoch %d = %05f\n", i, cost))
}
}
if(output_layer_act != 'softmax'){
pred_train <- predict_model(parameters, X,
hidden_layer_act,
output_layer_act)
Tr_acc <- mean(pred_train == Y) * 100
pred_test <- predict_model(parameters, X_test,
hidden_layer_act,
output_layer_act)
Ts_acc <- mean(pred_test == Y_test) * 100
cat(sprintf("Cost after epoch %d, = %05f;
Train Acc: %#.3f, Test Acc: %#.3f, \n",
i, cost, Tr_acc, Ts_acc))
}
else if(output_layer_act == 'softmax'){
pred_train <- predict_model(parameters, X,
hidden_layer_act, output_layer_act)
Tr_acc <- mean((pred_train - 1) == Y)
pred_test <- predict_model(parameters, X_test,
hidden_layer_act, output_layer_act)
Ts_acc <- mean((pred_test - 1) == Y_test)
cat(sprintf("Cost after epoch , %d, = %05f;
Train Acc: %#.3f, Test Acc: %#.3f, \n",
i, cost, Tr_acc, Ts_acc))
}
end_time <- Sys.time()
cat(sprintf("Application running time: %#.3f seconds\n",
end_time - start_time ))
return(list("parameters" = parameters, "costs" = costs))
} |
ranket <- function(n){
i <- complex(1,0,1)
theta <- runif(n=1,min=0,max=pi) #Pick random polar coordinates
phi <- runif(n=1,min=0,max=2*pi)
v <- ket( cos(theta/2) , exp(i*phi)*sin(theta/2)) #Create ket
if(n > 1){
for(j in 2:n){ #Repeat for as many qubits speicified
theta <- runif(n=1,min=0,max=pi)
phi <- runif(n=1,min=0,max=2*pi)
v <- tensor( v , ket( cos(theta/2) , exp(i*phi)*sin(theta/2)) )
}
}
v
}
| /R/ranket.R | no_license | cran/QuantumOps | R | false | false | 430 | r |
ranket <- function(n){
i <- complex(1,0,1)
theta <- runif(n=1,min=0,max=pi) #Pick random polar coordinates
phi <- runif(n=1,min=0,max=2*pi)
v <- ket( cos(theta/2) , exp(i*phi)*sin(theta/2)) #Create ket
if(n > 1){
for(j in 2:n){ #Repeat for as many qubits speicified
theta <- runif(n=1,min=0,max=pi)
phi <- runif(n=1,min=0,max=2*pi)
v <- tensor( v , ket( cos(theta/2) , exp(i*phi)*sin(theta/2)) )
}
}
v
}
|
source("load_data_set.R")
library(dplyr)
plot1data <- NEI %>% group_by(year) %>% summarise(Emissions = sum(Emissions))
png(filename = "plot1.png")
# Plots the emissions in thousands of 1000 tones
barplot(
plot1data$Emissions/1000,
names.arg = plot1data$year,
xlab = "Year",
ylab = "Emissions of PM2.5 (in thousand tons)",
main = "Change in PM2.5 emissions"
)
dev.off()
| /plot1.R | no_license | amadeann/DS-EDA-project-2 | R | false | false | 399 | r | source("load_data_set.R")
library(dplyr)
plot1data <- NEI %>% group_by(year) %>% summarise(Emissions = sum(Emissions))
png(filename = "plot1.png")
# Plots the emissions in thousands of 1000 tones
barplot(
plot1data$Emissions/1000,
names.arg = plot1data$year,
xlab = "Year",
ylab = "Emissions of PM2.5 (in thousand tons)",
main = "Change in PM2.5 emissions"
)
dev.off()
|
#' Cooks' D chart
#'
#' @description
#' Chart of cook's distance to detect observations that strongly influence
#' fitted values of the model.
#'
#' @param model An object of class \code{lm}.
#' @param print_plot logical; if \code{TRUE}, prints the plot else returns a plot object.
#'
#' @details
#' Cook's distance was introduced by American statistician R Dennis Cook in
#' 1977. It is used to identify influential data points. It depends on both the
#' residual and leverage i.e it takes it account both the \emph{x} value and
#' \emph{y} value of the observation.
#'
#' Steps to compute Cook's distance:
#'
#' \itemize{
#' \item Delete observations one at a time.
#' \item Refit the regression model on remaining \eqn{n - 1} observations
#' \item exmine how much all of the fitted values change when the ith observation is deleted.
#' }
#'
#' A data point having a large cook's d indicates that the data point strongly influences the fitted values.
#'
#' @return \code{ols_plot_cooksd_chart} returns a list containing the
#' following components:
#'
#' \item{outliers}{a \code{data.frame} with observation number and \code{cooks distance} that exceed \code{threshold}}
#' \item{threshold}{\code{threshold} for classifying an observation as an outlier}
#'
#' @section Deprecated Function:
#' \code{ols_cooksd_chart()} has been deprecated. Instead use \code{ols_plot_cooksd_chart()}.
#'
#' @examples
#' model <- lm(mpg ~ disp + hp + wt, data = mtcars)
#' ols_plot_cooksd_chart(model)
#'
#' @importFrom ggplot2 geom_linerange
#'
#' @seealso [ols_plot_cooksd_bar()]
#'
#' @export
#'
ols_plot_cooksd_chart <- function(model, print_plot = TRUE) {
check_model(model)
obs <- NULL
ckd <- NULL
txt <- NULL
cd <- NULL
k <- ols_prep_cdplot_data(model)
d <- ols_prep_outlier_obs(k)
f <- ols_prep_cdplot_outliers(k)
p <- ggplot(d, aes(x = obs, y = cd, label = txt, ymin = min(cd), ymax = cd)) +
geom_linerange(colour = "blue") + geom_point(shape = 1, colour = "blue") +
geom_hline(yintercept = k$ts, colour = "red") + xlab("Observation") +
ylab("Cook's D") + ggtitle("Cook's D Chart") +
geom_text(vjust = -1, size = 3, family = "serif", fontface = "italic",
colour = "darkred", na.rm = TRUE) +
annotate(
"text", x = Inf, y = Inf, hjust = 1.2, vjust = 2,
family = "serif", fontface = "italic", colour = "darkred",
label = paste("Threshold:", round(k$ts, 3))
)
if (print_plot) {
suppressWarnings(print(p))
} else {
return(list(plot = p, outliers = f, threshold = k$ts))
}
}
#' @export
#' @rdname ols_plot_cooksd_chart
#' @usage NULL
#'
ols_cooksd_chart <- function(model) {
.Deprecated("ols_plot_cooksd_chart()")
}
| /R/ols-cooks-d-chart.R | no_license | comatrion/olsrr | R | false | false | 2,709 | r | #' Cooks' D chart
#'
#' @description
#' Chart of cook's distance to detect observations that strongly influence
#' fitted values of the model.
#'
#' @param model An object of class \code{lm}.
#' @param print_plot logical; if \code{TRUE}, prints the plot else returns a plot object.
#'
#' @details
#' Cook's distance was introduced by American statistician R Dennis Cook in
#' 1977. It is used to identify influential data points. It depends on both the
#' residual and leverage i.e it takes it account both the \emph{x} value and
#' \emph{y} value of the observation.
#'
#' Steps to compute Cook's distance:
#'
#' \itemize{
#' \item Delete observations one at a time.
#' \item Refit the regression model on remaining \eqn{n - 1} observations
#' \item exmine how much all of the fitted values change when the ith observation is deleted.
#' }
#'
#' A data point having a large cook's d indicates that the data point strongly influences the fitted values.
#'
#' @return \code{ols_plot_cooksd_chart} returns a list containing the
#' following components:
#'
#' \item{outliers}{a \code{data.frame} with observation number and \code{cooks distance} that exceed \code{threshold}}
#' \item{threshold}{\code{threshold} for classifying an observation as an outlier}
#'
#' @section Deprecated Function:
#' \code{ols_cooksd_chart()} has been deprecated. Instead use \code{ols_plot_cooksd_chart()}.
#'
#' @examples
#' model <- lm(mpg ~ disp + hp + wt, data = mtcars)
#' ols_plot_cooksd_chart(model)
#'
#' @importFrom ggplot2 geom_linerange
#'
#' @seealso [ols_plot_cooksd_bar()]
#'
#' @export
#'
ols_plot_cooksd_chart <- function(model, print_plot = TRUE) {
check_model(model)
obs <- NULL
ckd <- NULL
txt <- NULL
cd <- NULL
k <- ols_prep_cdplot_data(model)
d <- ols_prep_outlier_obs(k)
f <- ols_prep_cdplot_outliers(k)
p <- ggplot(d, aes(x = obs, y = cd, label = txt, ymin = min(cd), ymax = cd)) +
geom_linerange(colour = "blue") + geom_point(shape = 1, colour = "blue") +
geom_hline(yintercept = k$ts, colour = "red") + xlab("Observation") +
ylab("Cook's D") + ggtitle("Cook's D Chart") +
geom_text(vjust = -1, size = 3, family = "serif", fontface = "italic",
colour = "darkred", na.rm = TRUE) +
annotate(
"text", x = Inf, y = Inf, hjust = 1.2, vjust = 2,
family = "serif", fontface = "italic", colour = "darkred",
label = paste("Threshold:", round(k$ts, 3))
)
if (print_plot) {
suppressWarnings(print(p))
} else {
return(list(plot = p, outliers = f, threshold = k$ts))
}
}
#' @export
#' @rdname ols_plot_cooksd_chart
#' @usage NULL
#'
ols_cooksd_chart <- function(model) {
.Deprecated("ols_plot_cooksd_chart()")
}
|
## Richter's Predictor Modeling Earthquake Damage
## Creating Neural Network Classifier to be used in blended ensemble model.
# Kylie Foster
# Useful websites:
## Loading required packages -----
#library(klaR)
library(plyr) # to rename factors using revalue
library(tidyverse)
library(caret) # for all models
library(MLmetrics) # additional metrics
library(tictoc) # to check timing of models
library(e1071)
library(nnet) # for neural network
library(corrplot) # to plot correlations
library(RColorBrewer) # nice colours for plots
## Function to calculate F1 micro score -----
f1_micro <- function(data, lev = NULL, model = NULL) {
Tot <- dim(data)[1] # total number of observations
conf <- confusionMatrix(data$pred, data$obs)[[2]] # confusion matrix table
TP <- conf[1, 1] + conf[2, 2] + conf[3, 3] # true positives
f1 <- TP/Tot # micro Precision, micro Recall and micro F1 are all equal
c(F1 = f1)
}
## Loading training data -----
# Loading labels
train_labels <- read_csv("./Richters_Predictor/data/train_labels.csv")
# Loading predictors
train_values <- read_csv("./Richters_Predictor/data/train_values.csv") %>%
mutate_if(is.character, as.factor) # convert characters to factors
# Combining data sets
train <- full_join(train_labels, train_values, by = "building_id")
# Removing useless variables
train <- select(train, -geo_level_2_id, -geo_level_3_id, -building_id)
# Keeping only variables with high importance from random forest results (keeping top 11 predictors)
train <- select(train, damage_grade, geo_level_1_id, area_percentage, age, height_percentage,
foundation_type, ground_floor_type, count_families, roof_type,
other_floor_type, land_surface_condition, position)
# geo_level_1_id 100.000
# area_percentage 49.127
# age 45.710
# height_percentage 29.764
# foundation_type 18.002
# ground_floor_type 15.714
# count_families 12.521
# roof_type 11.786
# other_floor_type 11.421
# land_surface_condition 11.300
# position 11.039
# Converting all factors to dummy variables
dmy <- dummyVars(" ~ .", data = train) # dummyVars from caret package
train <- data.frame(predict(dmy, newdata = train)) %>%
mutate(damage_grade = as.factor(damage_grade))
# Changing levels of Y to prevent problems with level names later.
train$damage_grade <- revalue(train$damage_grade,
c("1" = "low", "2"="med", "3"="high"))
# Checking correlations
# corrplot(cor(select_if(train, is.numeric)), type="upper", order="hclust",
# col=brewer.pal(n=8, name="RdYlBu"))
# There are some highly correlated predictors.
# splitting into two data sets (75/25%)
set.seed(200)
train_index <- createDataPartition(train$damage_grade, p = 0.75,
list = FALSE,
times = 1)
train_layer1 <- train[train_index, ] # used to fit models in first layer of ensemble
#train_layer2 <- train[-train_index, ] # used to fit final model combining layer 1 model predictions
## Setting up code that is common for all first level models -----
my_control <- trainControl(method = "cv", # for “cross-validation”
number = 5, # number of k-folds
summaryFunction = f1_micro,
allowParallel = TRUE,
classProbs = TRUE,
verboseIter = TRUE)
seed <- 123
metric <- "F1"
## Neural Network -----
set.seed(seed)
nnet_grid <- expand.grid(.size = c(10, 20, 25, 30, 35), # number of units in the single hidden layer
.decay = c(0.5, 0.1, 1e-2, 1e-3)) # regularisation parameter to avoid over-fitting
tic()
nn_model <- train(x = train_layer1[, -1],
y = train_layer1$damage_grade,
method = "nnet",
metric = metric,
trControl = my_control,
tuneGrid = nnet_grid,
preProc = c("center", "scale", "nzv","corr")) # nzv checks for near zero variance predictors
toc() # 19566.42 sec
# Printing results
print(nn_model) # 0.6441581
# Plotting results
jpeg("nn_nondefault.jpg")
plot(nn_model)
dev.off()
# save the model to disk
saveRDS(nn_model, "./nn_model_nondefault.rds")
| /blending_nn_nondefault.R | no_license | kylie-foster/Richters_Predictor | R | false | false | 4,656 | r | ## Richter's Predictor Modeling Earthquake Damage
## Creating Neural Network Classifier to be used in blended ensemble model.
# Kylie Foster
# Useful websites:
## Loading required packages -----
#library(klaR)
library(plyr) # to rename factors using revalue
library(tidyverse)
library(caret) # for all models
library(MLmetrics) # additional metrics
library(tictoc) # to check timing of models
library(e1071)
library(nnet) # for neural network
library(corrplot) # to plot correlations
library(RColorBrewer) # nice colours for plots
## Function to calculate F1 micro score -----
f1_micro <- function(data, lev = NULL, model = NULL) {
Tot <- dim(data)[1] # total number of observations
conf <- confusionMatrix(data$pred, data$obs)[[2]] # confusion matrix table
TP <- conf[1, 1] + conf[2, 2] + conf[3, 3] # true positives
f1 <- TP/Tot # micro Precision, micro Recall and micro F1 are all equal
c(F1 = f1)
}
## Loading training data -----
# Loading labels
train_labels <- read_csv("./Richters_Predictor/data/train_labels.csv")
# Loading predictors
train_values <- read_csv("./Richters_Predictor/data/train_values.csv") %>%
mutate_if(is.character, as.factor) # convert characters to factors
# Combining data sets
train <- full_join(train_labels, train_values, by = "building_id")
# Removing useless variables
train <- select(train, -geo_level_2_id, -geo_level_3_id, -building_id)
# Keeping only variables with high importance from random forest results (keeping top 11 predictors)
train <- select(train, damage_grade, geo_level_1_id, area_percentage, age, height_percentage,
foundation_type, ground_floor_type, count_families, roof_type,
other_floor_type, land_surface_condition, position)
# geo_level_1_id 100.000
# area_percentage 49.127
# age 45.710
# height_percentage 29.764
# foundation_type 18.002
# ground_floor_type 15.714
# count_families 12.521
# roof_type 11.786
# other_floor_type 11.421
# land_surface_condition 11.300
# position 11.039
# Converting all factors to dummy variables
dmy <- dummyVars(" ~ .", data = train) # dummyVars from caret package
train <- data.frame(predict(dmy, newdata = train)) %>%
mutate(damage_grade = as.factor(damage_grade))
# Changing levels of Y to prevent problems with level names later.
train$damage_grade <- revalue(train$damage_grade,
c("1" = "low", "2"="med", "3"="high"))
# Checking correlations
# corrplot(cor(select_if(train, is.numeric)), type="upper", order="hclust",
# col=brewer.pal(n=8, name="RdYlBu"))
# There are some highly correlated predictors.
# splitting into two data sets (75/25%)
set.seed(200)
train_index <- createDataPartition(train$damage_grade, p = 0.75,
list = FALSE,
times = 1)
train_layer1 <- train[train_index, ] # used to fit models in first layer of ensemble
#train_layer2 <- train[-train_index, ] # used to fit final model combining layer 1 model predictions
## Setting up code that is common for all first level models -----
my_control <- trainControl(method = "cv", # for “cross-validation”
number = 5, # number of k-folds
summaryFunction = f1_micro,
allowParallel = TRUE,
classProbs = TRUE,
verboseIter = TRUE)
seed <- 123
metric <- "F1"
## Neural Network -----
set.seed(seed)
nnet_grid <- expand.grid(.size = c(10, 20, 25, 30, 35), # number of units in the single hidden layer
.decay = c(0.5, 0.1, 1e-2, 1e-3)) # regularisation parameter to avoid over-fitting
tic()
nn_model <- train(x = train_layer1[, -1],
y = train_layer1$damage_grade,
method = "nnet",
metric = metric,
trControl = my_control,
tuneGrid = nnet_grid,
preProc = c("center", "scale", "nzv","corr")) # nzv checks for near zero variance predictors
toc() # 19566.42 sec
# Printing results
print(nn_model) # 0.6441581
# Plotting results
jpeg("nn_nondefault.jpg")
plot(nn_model)
dev.off()
# save the model to disk
saveRDS(nn_model, "./nn_model_nondefault.rds")
|
library(ggplot2);
#Load the data
summary_data <- readRDS("summarySCC_PM25.rds");
source_classification_code <- readRDS("Source_Classification_Code.rds");
#Subsetting the data from Baltimore City
baltimore_data <- subset(summary_data, fips == "24510")
# Sum all the emissions by year.
aggregated_by_year_type <- aggregate(Emissions ~ year + type , baltimore_data, sum);
#Create the g object
g <- ggplot(aggregated_by_year_type, aes(x = factor(year),y = Emissions, fill = type))
#Add configuration to the graphic
gg <- g + geom_bar(stat="identity") +
facet_grid(.~type) +
labs(x="Year", y="PM2.5 Emissions", title="PM2.5 Emissions by Type in Baltimore City");
png("Plot3.png",width=1000,height=500,bg="white");
print(gg);
dev.off(); | /4 Exploratory Data Analysis/Project2/Plot3.R | no_license | yadderace/CourseraAssignments | R | false | false | 745 | r | library(ggplot2);
#Load the data
summary_data <- readRDS("summarySCC_PM25.rds");
source_classification_code <- readRDS("Source_Classification_Code.rds");
#Subsetting the data from Baltimore City
baltimore_data <- subset(summary_data, fips == "24510")
# Sum all the emissions by year.
aggregated_by_year_type <- aggregate(Emissions ~ year + type , baltimore_data, sum);
#Create the g object
g <- ggplot(aggregated_by_year_type, aes(x = factor(year),y = Emissions, fill = type))
#Add configuration to the graphic
gg <- g + geom_bar(stat="identity") +
facet_grid(.~type) +
labs(x="Year", y="PM2.5 Emissions", title="PM2.5 Emissions by Type in Baltimore City");
png("Plot3.png",width=1000,height=500,bg="white");
print(gg);
dev.off(); |
rm(list = ls())
# Install function for packages
packages<-function(x){
x<-as.character(match.call()[[2]])
if (!require(x,character.only=TRUE)){
install.packages(pkgs=x,repos="http://cran.r-project.org")
require(x,character.only=TRUE)
}
}
packages(knitr)
packages(rmarkdown)
genomes <- c("Moghe2014", "Jeong2016", "Kitashiba2014", "Mitsui2015", "RR3NYEST")
for (genome in genomes){
rmarkdown::render("TopGo.Rmd", output_file=paste( genome, "_GoAnalysis.html", sep=""),
output_dir=paste("../DEseqOutput/", genome, "_GS", sep=""))
} | /RunTopGo.R | no_license | ACharbonneau/zealous-octo-tanuki | R | false | false | 569 | r | rm(list = ls())
# Install function for packages
packages<-function(x){
x<-as.character(match.call()[[2]])
if (!require(x,character.only=TRUE)){
install.packages(pkgs=x,repos="http://cran.r-project.org")
require(x,character.only=TRUE)
}
}
packages(knitr)
packages(rmarkdown)
genomes <- c("Moghe2014", "Jeong2016", "Kitashiba2014", "Mitsui2015", "RR3NYEST")
for (genome in genomes){
rmarkdown::render("TopGo.Rmd", output_file=paste( genome, "_GoAnalysis.html", sep=""),
output_dir=paste("../DEseqOutput/", genome, "_GS", sep=""))
} |
#' Creates an wordevok object.
#'
#' \code{as.wordevok} creates an object of the wordevok class from a data.frame object.
#'@param data A data.frame class object containing evocations. It may contain a column
#'identifying the individuals.
#'@param index String with the name of the column identifying the individuals.
#'@param na specifies how the missing information is informed in the original dataset.
#'For instance, if “na” is recorded as string (“Missing” or “NA” or “na” or
#'“null”), this argument should be specified accordingly: \code{na}=“Missing”
#'
#'@return \code{as.wordevok} returns a \code{wordevok} object. This class is based on a
#'list containing the following slots:
#'
#'\code{index}: stores the identifier of the evocation vectors and
#' has the name of the string inserted in the argument \code{index} of the
#' function \code{as.wordevok}. If \code{index = NULL}, the slot is named "Index".
#'
#'\code{Dataset}: stores the dataset organized according to the rules
#'established in \code{details}. Its rows are named according to the assigned
#'identifier.
#'
#'\code{Evocations}: stores the vector of standardized single evocations
#'in the dataset.
#'
#'\code{Loop}: stores the loops found (same evocation appearing two or
#'more times in the same row). Loops must be removed before using other
#'\code{wordevok} functions. Duplicates can be removed by the
#'\code{removing_loops_wordevok} function.
#'
#'@author Wesley H. S. Pereira (ICEx/UFMG), Denise Duarte (ICEx/UFMG), Rodrigo B. Ribeiro (IMPA), Gilvan Guedes (CEDEPLAR/UFMG).
#'@references Abric, J. C. (1994) Las representations sociales: aspects theoriques. In: Abric, J. C. Pratiques sociales et representations.
#' Paris: Presses Universitaires de France.
#'@references Abric, J. C. (2001) A structural approach to social representations.
#'In: Abric, J. C. Representations of the social: Bridging theoretical traditions.
#' Malden, UK: Blackwell Publishing.
#' @references Pereira, W. H. S. (2017). Representacao da estrutura do pensamento coletivo sobre
#' as enchentes do Rio Doce: conectando indivíduos afins através da teoria dos grafos.
#' Monografia (Graduacao em Estatistica) - Universidade Fedral de Minas Gerais.
#'@details
#'The \code{wordevok} class was developed to transform evocation data based on the
#'instrument used in the Free Words Association Technique (FWAT) into relational data
#'to be used in Social Network Analysis. Since evocations based on the FWAT instrument
#'are ranked by order of importance and qualitative coding and standardization of these
#'evocations may produce induced loops, it is likely that loop elimination causes blank
#'spaces between evocations for a person’s vector of evocations. The \code{wordevok} function
#'automatically corrects these blank spaces, moving evocations of lower order of
#'importance to the left of the evocation vector.
#'
#'The \code{as.wordevok} function provides the option to index each respondent
#'by an identifier (\code{string} or \code{numeric}). This identifier must occupy a column
#'in the \code{data.frame}
#'object and its name must be indicated by a \code{string} in the \code{index} parameter
#'of the function. For instance: \code{index} = "ID". If \code{index = NULL}, the function automatically indexes
#'individuals by a \code{numeric} identifier.
#'@examples
#'data("preferences")
#'pref = as.wordevok(preferences,index = "Name")
#'pref
#'@export
as.wordevok = function(data, index = NULL,na = NULL)
{
if(!is.null(na))
{
data = apply(data,2,as.character)
for(i in 1:dim(data)[2])
{
data[data[,i] == na,i] = NA
}
data = data.frame(data)
}
if(class(data)[1] != "data.frame")
{
stop ("Only objects of the data.frame class as supported by this function.")
}
if (!is.null(index)){
idx = as.character(data[,match(index,colnames(data))])
data = data[,-match(index,colnames(data))]
name = colnames(data)
data = data.frame(idx,data)
colnames(data) = c(index,name)
} else {
index = "Index"
idx = rep(1:dim(data)[1])
name = colnames(data)
data = data.frame(idx,data)
colnames(data) = c("Index",name)
}
name = colnames(data)
idx = as.character(data[,1])
evocation = data[,-1]
base = apply(evocation,2,as.character)
if(dim(evocation)[1] == 1)
{
base = t(base)
}
for (i in 1:dim(evocation)[1])
{
exchange = which(is.na(base[i,]) == TRUE)
if(length(exchange)!=0)
{
base[i,] = c(base[i,-exchange],base[i,exchange])
}
}
out = matrix(ncol = 1+dim(evocation)[2],nrow = dim(evocation)[1])
out[,1] = idx
out[,2:(1+dim(evocation)[2])] = base
colnames(out) = name
out = as.data.frame(out)
out = out[!is.na(out[,2]),]
output = list()
output[[index]] = out[,1]
rownames(out) = out[,1]
out = out[,-1]
base = apply(out,2,as.character)
if(dim(evocation)[1] == 1)
{
base = t(base)
}
rownames(base) = rownames(out)
colnames(base) = colnames(out)
evocations = list()
for(i in 1:dim(base)[2])
{
evocations[[i]] = (base[,i])
}
evocations = Reduce(c,evocations)
evocations = unique(evocations[!is.na(evocations)])
loop = data.frame(NA,NA,NA,NA)
colnames(loop) = c(index,"Evocation","Source","Target")
iter = 0
for(i in 1:dim(base)[1])
{
for(j in 1:(dim(base)[2]-1))
{
for(k in (j+1):(dim(base)[2]))
{
if(!is.na(base[i,j]) && !is.na(base[i,k]))
{
if(base[i,j] == base[i,k])
{
iter = iter + 1
loop[iter,1] = rownames(base)[i]
loop[iter,2] = base[i,j]
loop[iter,3] = colnames(base)[j]
loop[iter,4] = colnames(base)[k]
}
}
}
}
}
output$Dataset = out
output$Evocations = sort(evocations)
if(iter == 0)
{
output$Loop = NULL
} else {
output$Loop = as.data.frame(loop)
warning("The wordevok file contains loops.")
}
class(output) = "wordevok"
return(output)
}
#' Removal of loops of a wordevok object.
#'
#' This function removes the loops from an wordevok object.
#'@param wordevok A wordevok class object.
#'@return \code{removing_loops_wordevok} returns a wordevok object without loops.
#'@author Wesley Henrique Silva Pereira
#'@details
#'Under the Free Word Association Technique, original evocations can vary from
#'single words to complete sentences. However, the analysis of this original
#'evocation set is challenging, since evocations are sometimes too specific.
#'Moreover, although certain evocations are syntactically distinct, they can be
#'semantically equivalent. In this case, qualitative standardization of the original
#'evocations is used, giving rise to unique standardized evocations. These evocations
#'can be found in the slot Evocations of wordevok object.
#'
#'The standardization process creates unnatural loops in the evocation dataset:
#'the appearance of the same evocation twice for the same individual. In Graph Theory,
#'there are applications where such loops are acceptable. For the analysis of collective
#'meanings, however, this behavior does not make sense and represents a looped induced by
#'the standardization process. The best alternative to this problem is to manually try to
#'reclassify one of the repeated evocations in a different semantic class that agrees with
#'its original meaning. When manual reclassification is not possible, the
#'removing_loops_wordevok function removes from the wordevok object the repeated
#'evocation that has been assigned to the lower order of importance. In addition,
#'the function automatically rearranges the remaining evocations so that the NA
#'observations are placed to the rightest positions of the vector.
#'
#'@references Pereira, W. H. S. (2017). Representacao da estrutura do pensamento coletivo sobre
#' as enchentes do Rio Doce: conectando indivíduos afins através da teoria dos grafos.
#' Monografia (Graduacao em Estatistica) - Universidade Fedral de Minas Gerais.
#'@examples
#'#Creating a wordevok object:
#'data("preferences")
#'pref = as.wordevok(preferences,index = "Name")
#'pref
#'#Removing loops:
#'noloop.pref = removing_loops_wordevok(pref)
#'noloop.pref
#'@export
removing_loops_wordevok = function(wordevok)
{
if(class(wordevok)[1] != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(is.null(wordevok$Loop))
{
return(wordevok)
} else {
loop = wordevok$Loop
dataset = wordevok$Dataset
base = apply(dataset,2,as.character)
colnames(base) = colnames(dataset)
rownames(base) = rownames(dataset)
for (i in 1:dim(loop)[1])
{
base[match(loop[i,1],rownames(base)),loop[i,4]] = NA
}
}
wordevok$Loop = NULL
loop = data.frame(NA,NA,NA,NA)
colnames(loop) = c(names(wordevok)[1],"Evocation","Source","Target")
iter = 0
for(i in 1:dim(base)[1])
{
for(j in 1:(dim(base)[2]-1))
{
for(k in (j+1):(dim(base)[2]))
{
if(!is.na(base[i,j]) && !is.na(base[i,k]))
{
if(base[i,j] == base[i,k])
{
iter = iter + 1
loop[iter,1] = rownames(base)[i]
loop[iter,2] = base[i,j]
loop[iter,3] = colnames(base)[j]
loop[iter,4] = colnames(base)[k]
}
}
}
}
}
if(iter == 0)
{
wordevok$Loop = NULL
} else {
warning("This dataset contains loops")
wordevok$Loop = as.data.frame(loop)
}
for (i in 1:dim(base)[1])
{
exchange = which(is.na(base[i,]) == TRUE)
if(length(exchange)!=0)
{
base[i,] = c(base[i,-exchange],base[i,exchange])
}
}
wordevok$Dataset = as.data.frame(base)
return(wordevok)
}
#' Measuring affinity within two individuals.
#'
#' Under Free Word Association Technique, this function measures the affinity
#' between the evoked vectors of two individuals.
#'@param subject1 Evoked vector of the individual 1.
#'@param subject2 Evoked vector of the individual 2.
#'@return \code{affinity} returns a numeric measure of affinity between the vectors
#'inputed.
#'@author Wesley H. S. Pereira (ICEx/UFMG), Denise Duarte (ICEx/UFMG), Rodrigo B. Ribeiro (IMPA), Gilvan Guedes (CEDEPLAR/UFMG).
#'@details
#'
#'Under the criteria of the Free Word Association Technique, we propose a
#'coefficient to calculate the affinity (similiraty) between two vectors of evocations.
#'Details on the affinity coefficient can be found in
#'\url{http://wesleyhenriquesp.wixsite.com/rwordevok/affinity}.
#'
#'The maximum number of evocations allowed to a vector is controlled by the length
#'of the vectors. Therefore, the vectors \code{subject1} and \code{subject2}
#'must have equal size. If an individual pronounces less than the maximum \code{N},
#'the vector must be structured so that n evocations pronounced are in the first \code{n}
#'positions of the vector according to their importance to the individual and the
#'remaining \code{N - n} positions must be filled with NA observations.
#'
#'@references Pereira, W. H. S. (2017).
#'@examples
#'#Creating evocation's vectors:
#'Murillo = c("Regression Analysis","Multivariate Statistics",
#'"General Statistiscs", "Experiment Planning", "Sampling")
#'Ingrid = c("Regression Analysis","Multivariate Statistics",
#'"Temporal Series","Statistical Quality Control", NA)
#'affinity(Murillo,Ingrid)
#'@export
affinity = function(subject1,subject2)
{
if(length(subject1) != length(subject2))
{
stop("Arguments must be the same size.")
}
n1 = length(subject1[!is.na(subject1)])
n2 = length(subject2[!is.na(subject2)])
n = max(n1,n2)
N = length(subject1)
denominator = n*n*(n+1)
cor1 = (N - n) * (N - n + 1)
cor2 = N * (N + 1)
omega_n = 1 - cor1/cor2
affinity_coefficient = 0
for (i in 1:n)
{
for(j in 1:n)
{
if(!is.na(subject1[i]) && !is.na(subject2[j]))
{
if(subject1[i] == subject2[j])
{
theta_i_j = 2*(n + 1) - (i + j)
rho_i_j = n - abs(i-j)
affinity_coefficient = affinity_coefficient + theta_i_j*rho_i_j
}
}
}
}
beta_uv = affinity_coefficient/denominator
affinity_coefficient = beta_uv*omega_n
return(affinity_coefficient)
}
#' Measuring weights to edges of personal network.
#'
#' Under Free Word Association Technique, this function measures the weights
#' assigned to each edge pair linking different evocations of the same individual.
#'@param n Number of evocations expressed by a individual.
#'@param N Maximum number of evocations expressed allowed to an individual.
#'@param output Type of output to be generated. See details to more informations.
#'@details
#'Under the criteria of the Free Word Association Technique, we suppose that that
#'the evocations that are expressed by the same individual are mentally connected.
#'We propose a strategy to measure the weight of the connection for each pair of
#' these mental connections.
#'Details on the meaning_weighted can be found in
#'\url{http://wesleyhenriquesp.wixsite.com/rwordevok/meaning-weighted}.
#'@author Wesley Henrique Silva Pereira
#'@return \code{meaning_weighted} returns a set of numeric weights that will be a
#'ssigned to each evocation pair
#'of the individual vector.
#'@examples
#'Murillo = c("Regression Analysis","Multivariate Statistics",
#'"General Statistiscs", "Experiment Planning", "Sampling")
#'n = length(Murillo)
#'N = n
#'mw = meaning_weighted(n,N)
#'colnames(mw) = Murillo
#'rownames(mw) = Murillo
#'mw
#'@export
meaning_weighted = function(n, N , output = "Matrix")
{
iter = 0
numerator = NULL
denominator = 0
if (output != "Matrix" & output != "Vector")
{
stop("This function is defined only to Vector or Matrix output.")
}
if(n < 2 || N < 2)
{
stop("This function is defined only to n and N greater than 2.")
}
if(n > N)
{
stop("This function is defined only to n lesser or equal than N.")
}
cor1 = (N - n)*(N - n + 1)
cor2 = N*(N + 1)
omega_n = 1 - cor1/cor2
if (output == "Matrix")
{
numerator = matrix(ncol = N, nrow = N)
colnames(numerator) = paste0("Evocation_",01:N)
rownames(numerator) = paste0("Evocation_",01:N)
numerator[,] = NA
for (i in 1:(n-1))
{
for (j in (i+1):n){
numerator[i,j] = ((2^(2*n+2-i-j))-1)*(n - abs(i - j))
denominator = denominator + numerator[i,j]
}
}
meaning = (numerator/denominator)*omega_n
}
if (output == "Vector")
{
for (i in 1:(n-1))
{
for (j in (i+1):n){
iter = iter + 1
numerator[iter] = ((2^(2*n+2-i-j))-1)*(n - abs(i - j))
denominator = denominator + numerator[iter]
}
}
meaning = (numerator/denominator)*omega_n
}
return(meaning)
}
#' Construction of the adjacency matrix by intersection.
#'
#' This function constructs the adjacency matrix by the number of
#' evocations in common.
#'@param wordevok A wordevok class object.
#'@author Wesley Henrique Silva Pereira
#'@return \code{wordevok_intersection_adjacency} returns the adjacency matrix in which
#'each line and each column are one
#'indiviual and each cell is the common evocation's number between the line's
#'individual and column's individual.
#'@examples
#'data("preferences")
#'mtx = wordevok_intersection_adjacency(noloop.pref)
#'mtx
#'@export
wordevok_intersection_adjacency = function(wordevok)
{
if(class(wordevok) != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(!is.null(wordevok$Loop))
{
stop("To use this function, the wordevok object cannot contain loops.")
}
adjacency = matrix(nrow = dim(wordevok$Dataset)[1], ncol = dim(wordevok$Dataset)[1])
colnames(adjacency) = as.character(wordevok[[1]])
rownames(adjacency) = colnames(adjacency)
diag(adjacency) = 0
for(i in 1:(dim(wordevok$Dataset)[1]-1))
{
subject1 = wordevok$Dataset[i,]
subject.1 = subject1[!is.na(subject1)]
for(j in (i+1):(dim(wordevok$Dataset)[1]))
{
subject2 = wordevok$Dataset[j,]
subject.2 = subject2[!is.na(subject2)]
adjacency[i,j] = sum(subject.1 %in% subject.2)
adjacency[j,i] = sum(subject.1 %in% subject.2)
}
}
return(adjacency)
}
#' Construction of the adjacency matrix by affinity coefficient.
#'
#' This function constructs the adjacency matrix by affinity coefficient.
#'@param wordevok A object of wordevok class.
#'@return \code{wordevok_affinity_adjacency} returns the adjacency matrix in which each
#'line and each column are one indiviual
#' and each cell is the affinity coefficient between the line's individual and
#' column's individual.
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'mtx
#'@export
wordevok_affinity_adjacency = function(wordevok)
{
if(class(wordevok) != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(!is.null(wordevok$Loop))
{
stop("To use this function, the wordevok object cannot contain loops.")
}
adjacency = matrix(nrow = dim(wordevok$Dataset)[1], ncol = dim(wordevok$Dataset)[1])
colnames(adjacency) = as.character(wordevok[[1]])
rownames(adjacency) = colnames(adjacency)
adjacency[,] = 0
for(i in 1:(dim(wordevok$Dataset)[1]-1))
{
subject1 = apply(wordevok$Dataset[i,],2,as.character)
subject.1 = subject1[!is.na(subject1)]
for(j in (i+1):(dim(wordevok$Dataset)[1]))
{
subject2 = apply(wordevok$Dataset[j,],2,as.character)
subject.2 = subject2[!is.na(subject2)]
if(sum(subject.1 %in% subject.2) > 0)
{
adjacency[i,j] = affinity(subject1,subject2)
adjacency[j,i] = adjacency[i,j]
}
}
}
return(adjacency)
}
#' Construction of the binary adjacency matrix.
#'
#' This function constructs the binary adjacency matrix.
#'@param wordevok A wordevok class object.
#'@return \code{wordevok_intersection_adjacency} returns the adjacency matrix
#'in which
#'each line and each column are one indiviual
#'and each cell is the sum of all weigths assigned to that evocation link.
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_binary_adjacency(noloop.pref)
#'mtx
#'@export
wordevok_binary_adjacency = function(wordevok)
{
if(class(wordevok) != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(!is.null(wordevok$Loop))
{
stop("To use this function, the wordevok object cannot contain loops.")
}
adjacency = matrix(nrow = dim(wordevok$Dataset)[1], ncol = dim(wordevok$Dataset)[1])
colnames(adjacency) = as.character(wordevok[[1]])
rownames(adjacency) = colnames(adjacency)
adjacency[,] = 0
for(i in 1:(dim(wordevok$Dataset)[1]-1))
{
subject1 = wordevok$Dataset[i,]
subject.1 = subject1[!is.na(subject1)]
for(j in (i+1):(dim(wordevok$Dataset)[1]))
{
subject2 = wordevok$Dataset[j,]
subject.2 = subject2[!is.na(subject2)]
if(sum(subject.1 %in% subject.2) > 0)
{
adjacency[i,j] = 1
adjacency[j,i] = 1
}
}
}
return(adjacency)
}
#' Construction of the adjacency matrix by meaning weighted method.
#'
#' This function constructs the adjacency matrix by meaning weighted method.
#'@param wordevok A wordevok class object.
#'@author Wesley Henrique Silva Pereira
#'@return \code{wordevok_meaning_adjacency} returns the adjacency matrix in which each
#'line and each column are an evocation
#'and each cell takes on 1 if the line's individual and column's individual have
#'common evocations. Otherwise, the cells takes on 0.
#'@examples
#'data("preferences")
#'mtx = wordevok_meaning_adjacency(noloop.pref)
#'mtx
#'@export
wordevok_meaning_adjacency = function(wordevok)
{
if(class(wordevok)[1] != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(!is.null(wordevok$Loop))
{
stop("To use this function, the wordevok object cannot contain loops.")
}
evocations = wordevok$Evocations
meaning = wordevok_meaning_list(wordevok,"Simplify")
adjacency = matrix(ncol = length(evocations),nrow = length(evocations))
colnames(adjacency) = evocations
rownames(adjacency) = evocations
adjacency[,] = 0
for(i in 1:dim(meaning)[1])
{
source = as.character(meaning[i,1])
target = as.character(meaning[i,2])
adjacency[source,target] = meaning[i,3]
adjacency[target,source] = meaning[i,3]
}
return(adjacency)
}
#' Construction of the list of edges by meaning weighted method.
#'
#' This function constructs the list of edges by meaning weighted method.
#'@param wordevok A wordevok class object.
#'@param output Type of output to be generated. See details to more information.
#'@author Wesley Henrique Silva Pereira
#'@return \code{wordevok_meaning_list} returns a list containing the edges and their
#'respective weights.
#'@details
#'The output argument of the function x must be:
#'
#'- "Complete": the object contains the identifier of the individual
#'generating the edge, the vertices bordering the edge and the weights
#'assigned to the edges.
#'
#'- "Edgelist": the same elements of the "Complete" output, except the
#'identifier of the individual generator.
#'
#'- "Simplify": this output contains only the unique edges and their weights.
#'The weights associated with these represent the sum of the
#'weights of all your duplicates.
#'@examples
#'data("preferences")
#'lst = wordevok_meaning_list(noloop.pref)
#'lst
#'@export
wordevok_meaning_list = function(wordevok, output = "Complete")
{
if(class(wordevok)[1] != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(!is.null(wordevok$Loop))
{
stop("To use this function, the wordevok object cannot contain loops.")
}
if(output != "Complete" && output != "Edgelist" && output != "Simplify")
{
stop("This output type is not defined.")
}
dataset = apply(wordevok$Dataset,2,as.character)
n = dim(dataset)[2]
source = list()
for (i in 1:n)
{
source[[i]] = as.data.frame(cbind(as.character(wordevok[[1]]),dataset[,i],i))
}
target = list()
c = 0
for (i in 1:(n-1))
{
for (j in (i+1):n)
{
c = c + 1
target[[c]] = as.data.frame(cbind(source[[i]],source[[j]][,-1]))
}
}
listing = as.data.frame(rbindlist(target))
listing = as.data.frame(apply(listing,2,as.character))
listing = na.omit(listing)
colnames(listing) = c(names(wordevok)[1],"Source","Source_ID","Target","Target_ID")
listing = as.data.frame(apply(listing,2,as.character))
ponder = split(listing,listing[,1])
for (i in 1:length(ponder))
{
conect = max(as.numeric(as.character(ponder[[i]]$Target_ID)))
ponder[[i]]$weight = meaning_weighted(conect,n,"Vector")
}
listing = as.data.frame(rbindlist(ponder))
if(output == "Complete")
{
return(listing[,c(names(wordevok)[1],"Source","Target","weight")])
} else {
listing = listing[,c("Source","Target","weight")]
if(output == "Edgelist")
{
return(listing)
} else {
combining = t(combn(wordevok$Evocations,2))
weigh = NULL
for(i in 1:dim(combining)[1]){
verse = sum(listing[listing$Source == combining[i,1] &
listing$Target == combining[i,2],"weight"])
inverse = sum(listing[listing$Source == combining[i,2] &
listing$Target == combining[i,1],"weight"])
weigh[i] = verse + inverse
}
listing = as.data.frame(combining)
listing$v3 = weigh
listing = listing[listing$v3 > 0,]
colnames(listing) = c("Source","Target","weight")
return(listing)
}
}
}
#' Construction of the adjacency matrix by laplacian method.
#'
#' This function constructs the adjacency matrix by laplacian method.
#'@param wordevok A wordevok class object.
#'@param normalized Logical argument. If \code{normalized = TRUE}, the output will be
#'normalized.
#'@return \code{wordevok_laplacian_adjacency} returns the adjacency matrix in which each
#'line and each column are an
#'evocation and each cell takes on -1 if the line's individual and column's
#'individual have common evocations. The diagonal of the matrix equals the number of
#'edges incident on the vertex.
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_laplacian_adjacency(noloop.pref)
#'mtx
#'@export
wordevok_laplacian_adjacency = function(wordevok,normalized = FALSE)
{
laplacian = wordevok_binary_adjacency(wordevok)
dgn = as.numeric(rowSums(laplacian))
laplacian = laplacian*(-1)
for (i in 1:length(dgn))
{
laplacian[i,i] = dgn[i]
}
if (normalized)
{
for (i in 1:length(dgn))
{
if(dgn[i] != 0)
{
laplacian[i,] = laplacian[i,]/dgn[i]
}
}
return(laplacian)
} else {
return(laplacian)
}
}
#' Extrai uma lista contendo as evocações das comunidades do objeto wordevok.
#'
#' Essa função extrai, mediante a informação dos membros de cada comunidade
#' encontrada por um método de agrupamento em redes, uma lista contendo os
#' subconjuntos de evocações de cada uma das comunidades informadas.
#'@param wordevok Um objeto da classe wordevok.
#'@param groups Uma lista contendo os membros da respectiva comunidade.
#'....
#'@return \code{wordevok_comm_subsets} Retorna uma lista onde em cada uma das
#'dimensões são alocados os subconjuntos do objeto \code{wordevok} orginal
#'de acordo com as comunidades informadas.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#' add.rownames = "names")
#'c = cluster_louvain(g,weights = E(g)$weight)
#'sub = wordevok_comm_subsets(noloop.pref,c)
#'@export
wordevok_comm_subsets = function(wordevok,groups)
{
n = length(groups)
subsets = list()
for (i in 1:n)
{
subsets[[i]] = wordevok$Dataset[match(groups[[i]],rownames(wordevok$Dataset)),]
}
names(subsets) = paste0("Group",1:n)
return(subsets)
}
#' Extract the submatrix of adjacency matrix under founded communities.
#'
#' ....
#'@param adjacency A adjacency matrix.
#'@param groups Uma lista contendo os membros da respectiva comunidade.
#'@param laplacian ...
#'....
#'@return \code{wordevok_comm_submatrix} ...
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g,weights = E(g)$weight)
#'sub = wordevok_comm_submatrix(mtx,c,laplacian = FALSE)
#'@export
wordevok_comm_submatrix = function(adjacency,groups,laplacian = FALSE)
{
n = length(groups)
name = rownames(adjacency)
submatrix = list()
for (i in 1:n)
{
idx = match(groups[[i]],name)
if (length(idx) > 1)
{
mtx = adjacency[idx,]
mtx = mtx[,idx]
submatrix[[i]] = mtx
} else {
mtx = adjacency[idx,]
mtx = mtx[idx]
submatrix[[i]] = mtx
}
}
names(submatrix) = paste0("group",1:n)
if(laplacian == FALSE)
{
return(submatrix)
} else {
for (i in 1:n)
{
mtx = submatrix[[i]]
s = colSums(mtx)
for (j in 1:dim(mtx)[1])
{
mtx[j,j] = mtx[j,j] - s[j]
}
submatrix[[i]] = mtx
}
return(submatrix)
}
}
#' Transforma uma matriz de adjacências em uma lista de arestas.
#'
#' Esta função transforma a matriz de adjacências em uma lista de arestas.
#'@param adjacency Uma matriz de adjacencias.
#'...
#'@return \code{wordevok_adjacency_to_list} retorna uma lista de adjacências
#'conforme fornecida pela matriz de adjacências original. É importante ressaltar
#'que essa transformação ignora as limitações de se representar as conexões do
#'grafo através de uma lista de adjacência (como por exemplo a incapacidade de
#'represnetar vértices isolados). As modificações necessárias ficam
#'a cargo do usuário.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'lst = wordevok_adjacency_to_list(mtx)
#'@export
wordevok_adjacency_to_list = function(adjacency)
{
name = rownames(adjacency)
combination = as.data.frame(t(combn(name,2)))
colnames(combination) = c("Source","Target")
combination$weight = 0
for (i in 1:dim(combination)[1])
{
combination$weight[i] = adjacency[as.character(combination$Source[i]),
as.character(combination$Target[i])]
}
return(combination)
}
#' Cria subgrupos de uma lista de acordo com os grupos informados.
#'
#' Esta função cria sublistas de adjacência mediante à informação das
#' comunidades.
#'
#'@param list Uma lista de arestas.
#'@param groups Uma lista contendo os membros da respectiva comunidade.
#'....
#'@return \code{split_list_wordevok} retorna uma lista contendo em cada dimensão
#'as ligações por comunidade. Assim, a função ignora as ligações entre comunidades, deixando apenas as
#' ligações entre membros de uma mesma comunidade.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_meaning_list(noloop.pref)
#'g = graph_from_data_frame(mtx[,c(2,3)], directed = FALSE)
#'c = cluster_louvain(g)
#'lst = split_list_wordevok(mtx,c)
#'@export
split_list_wordevok = function(list,groups)
{
evo = list()
for(i in 1:length(groups))
{
step1 = split(list,list$Source)
step2 = step1[groups[[i]]]
step3 = data.frame(rbindlist(step2))
step4 = split(step3,step3$Target)
step5 = step4[groups[[i]]]
evo[[i]] = data.frame(rbindlist(step5))
}
return(evo)
}
#' Armazena a frequência de evocação e a ordem média de evocação de cada evocação
#' única padronizada.
#'
#' ....
#'@param wordevok Um objeto da classe wordevok.
#'@param groups Uma lista contendo os membros da respectiva comunidade.
#'....
#'@return \code{telp_wordevok} retorna uma lista que contém em cada dimensão sublistas
#'com um data.frame contendo as evocações únicas, ordem média de evocação e frequência
#'de evocação por comunidade.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'@export
telp_wordevok = function(wordevok,groups)
{
cp = wordevok_comm_subsets(wordevok,groups)
for(i in 1:length(cp))
{
cp[[i]]$IDCLASS = rownames(cp[[i]])
}
lcp = list()
for (i in 1:length(cp))
{
lcp[[i]] = as.wordevok(cp[[i]],"IDCLASS")
}
fcp = list()
for (i in 1:length(lcp))
{
fcp[[i]] = table(as.matrix(lcp[[i]]$Dataset))
}
wcp = list()
for (i in 1:length(fcp))
{
wcp[[i]] = rownames(fcp[[i]])
}
ome = list()
for (i in 1:length(lcp))
{
ome[[i]] = rep(1:length(wcp[[i]]))
for(j in 1:length(wcp[[i]]))
{
k = dim(lcp[[i]]$Dataset)[2]
eval(parse(text=paste0("ome[[",i,"]][",j,"] = round((",paste(paste0(1:k,
"*","sum(na.omit(as.integer(lcp[[",i,
"]]$Dataset[,",1:k,"]==wcp[[",i,"]][",j,
"])))"),collapse = "+"),")/fcp[[",i,"]][",
j,"],2)")))
}
}
resini = list()
for(i in 1:length(ome))
{
resini[[i]] = cbind(wcp[[i]],fcp[[i]],ome[[i]])
rownames(resini[[i]])=rep("",nrow(resini[[i]]))
}
mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
cfmean = list()
for(i in 1:length(resini))
{
cfmean[[i]] = rep(0,2)
cfmean[[i]][1] = round(mean(as.numeric(resini[[i]][,2])),2)
cfmean[[i]][2] = round(mean(as.numeric(resini[[i]][,3])),2)
}
cfmedian = list()
for(i in 1:length(resini))
{
cfmedian[[i]] = rep(0,2)
cfmedian[[i]][1] = round(median(as.numeric(resini[[i]][,2])),2)
cfmedian[[i]][2] = round(median(as.numeric(resini[[i]][,3])),2)
}
cfmode = list()
for(i in 1:length(resini))
{
cfmode[[i]] = rep(0,2)
cfmode[[i]][1] = round(mode(as.numeric(resini[[i]][,2])),2)
cfmode[[i]][2] = round(mode(as.numeric(resini[[i]][,3])),2)
}
output = list()
for (i in 1:length(resini))
{
output[[i]] = list()
output[[i]][[1]] = resini[[i]]
output[[i]][[2]] = cfmean[[i]]
output[[i]][[3]] = cfmedian[[i]]
output[[i]][[4]] = cfmode[[i]]
names(output[[i]]) = c("resini","cfmean","cfmedian","cfmode")
class(output[[i]]) = "TELP-wordevok"
}
class(output) = "Multiple-TELP-wordevok"
return(output)
}
#' Apresenta o gráfico de quadrantes de Vergés.
#'
#' Esta função apresenta o gráfico de quadrantes de Vergés.
#'
#'@param twfile Uma dimensão de um objeto da classe \code{Multiple-TELP-wordevok}, gerado pela função \code{telp_wordevok}
#'@param ln Lógico. \code{TRUE} implica na utilização da escala logarítmica nos valores encontrados.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'telp_wordevok_plot(t[[1]])
#'@export
telp_wordevok_plot = function(twfile,ln=FALSE)
{
if(class(twfile) != "TELP-wordevok")
{
stop("Erro ainda n?o definido.")
}
if(ln)
{
plot(log(as.numeric(twfile$resini[,2])),
log(as.numeric(twfile$resini[,3])),
xlab="Logaritmo da Frequencia de Evocacao",
ylab="Logaritmo da Ordem Media de Evocacao",
col=rainbow(nrow(twfile$resini)),
pch=19)
abline(v=log(as.numeric(twfile$cfmean[1])),
h=log(as.numeric(twfile$cfmean[2])), col = "gray20")
abline(v=log(as.numeric(twfile$cfmedian[1])),
h=log(as.numeric(twfile$cfmedian[2])), col = "red")
abline(v=log(as.numeric(twfile$cfmode[1])),
h=log(as.numeric(twfile$cfmode[2])), col = "purple")
} else {
plot((as.numeric(twfile$resini[,2])),
(as.numeric(twfile$resini[,3])),
xlab="Frequencia de Evocacao",
ylab="Ordem Media de Evocacao",
col=rainbow(nrow(twfile$resini)),
pch=19)
abline(v=(as.numeric(twfile$cfmean[1])),
h=(as.numeric(twfile$cfmean[2])), col = "gray20")
abline(v=(as.numeric(twfile$cfmedian[1])),
h=(as.numeric(twfile$cfmedian[2])), col = "red")
abline(v=(as.numeric(twfile$cfmode[1])),
h=(as.numeric(twfile$cfmode[2])), col = "purple")
}
}
#' Divide as evocações por seus respectivos quadrantes de Vergès segundo o método selecionado.
#'
#'
#'@param mtwfile Um objeto da classe \code{Multiple-TELP-wordevok}, gerado pela função \code{telp_wordevok}.
#'@param method Criterio de discriminação dos quadrantes.
#'....
#'@return \code{telp_wordevok_quad} retorna uma lista contendo os quadrantes de Vergès
#'da representação social. Cada dimensão do objeto resultante representa uma comunidade, e cada subdimensão
#'representa um quadrante daquela comunidade.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'twq = telp_wordevok_quad(t,"mean")
#'@export
telp_wordevok_quad = function(mtwfile,method = "mean")
{
quad = list()
if (method == "mean")
{
m = 2
} else {
if (method == "median")
{
m = 3
} else {
m = 4
}
}
for(i in 1:length(mtwfile))
{
quad[[i]] = list()
resini = mtwfile[[i]]$resini
resini = data.frame(resini)
quad[[i]][[1]] = data.frame(resini[(as.integer(resini[,2])>=mtwfile[[i]][[m]][1]&as.numeric(resini[,3])<mtwfile[[i]][[m]][2]),])
colnames(quad[[i]][[1]]) = c("Evocation","Frequency","AEO")
quad[[i]][[2]] = data.frame(resini[(as.integer(resini[,2])>=mtwfile[[i]][[m]][1]&as.numeric(resini[,3])>=mtwfile[[i]][[m]][2]),])
colnames(quad[[i]][[2]]) = c("Evocation","Frequency","AEO")
quad[[i]][[3]] = data.frame(resini[(as.integer(resini[,2])<mtwfile[[i]][[m]][1]&as.numeric(resini[,3])<mtwfile[[i]][[m]][2]),])
colnames(quad[[i]][[3]]) = c("Evocation","Frequency","AEO")
quad[[i]][[4]] = data.frame(resini[(as.integer(resini[,2])<mtwfile[[i]][[m]][1]&as.numeric(resini[,3])>=mtwfile[[i]][[m]][2]),])
colnames(quad[[i]][[4]]) = c("Evocation","Frequency","AEO")
names(quad[[i]]) = c("Quad1","Quad2","Quad3","Quad4")
}
class(quad) = "TELP-wordevok-quad"
return(quad)
}
#' Calcula as coordenadas da relevância para os signficados dos quadrantes de
#' uma determinada comunidade.
#'
#'@param quad Uma subdimensão de um objeto da classe \code{TELP-wordevok-quad} gerado pela função
#'telp-wordevok-quad.
#'@param classe Um documento listando as classes às quais pertencem as evocações
#'únicas do wordevok. Veja os detalhes para mais informações.
#'....
#'@return \code{wordevok_quad_class} Retorna as coordenadas de relevância relativa
#'de um determinado significado para um quadrante de uma comunidade.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'twq = telp_wordevok_quad(t,"mean")
#'
#'# Macro-groups
#'
#'u = noloop.pref$Evocations
#'mg = c("Optional","Optional","Fifth","Fifth","Second","First","Optional",
#' "Eigth","Fifth","Third","Second","Forth","Optional","Forth","Sixth",
#' "Third","Optional","Forth","Optional","Sixth")
#'u = data.frame(cbind(u,mg))
#'wqc= wordevok_quad_class(twq[[1]][[1]],u)
#'@export
wordevok_quad_class = function(quad,classe)
{
classe = apply(classe,2,as.character)
if(dim(quad)[1] == 1)
{
quad = apply(quad,2,as.character)
quad = t(quad)
}else{
quad = apply(quad,2,as.character)
}
zata = unique(as.character(as.character(classe[,2])))
radgeral = as.data.frame(matrix(ncol = length(zata),nrow=1))
colnames(radgeral) = zata
radgeral[1,] = rep(0,length(zata))
radgeral[2:3,] = rep(0,length(zata))
for(i in 1:dim(classe)[1]){
search = match(as.character(classe[i,1]),quad[,1])
if(!is.na(search))
{
radcol = match(as.character(classe[i,2]),colnames(radgeral))
radgeral[3,radcol] = radgeral[3,radcol] + log(as.numeric(quad[search,2]))*(1/as.numeric(quad[search,3]))
}
}
radgeral[1,] = max(radgeral[3,])
return(radgeral)
}
#' Calcula as coordenadas da relevância para os signficados dos quadrantes de
#' multicomunidades.
#'
#'@param twqfile Um objeto da classe \code{TELP-wordevok-quad} gerado pela função
#'\code{telp-wordevok-quad}.
#'@param classe Um documento listando as classes às quais pertencem as evocações
#'únicas do wordevok. Veja os detalhes para mais informações.
#'....
#'@return \code{wordevok_radar_attr} retorna uma lista de coordenadas de relevância relativa
#'de um determinado significado para cada quadrante de cada comunidade inserida.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'twq = telp_wordevok_quad(t,"mean")
#'
#'# Macro-groups
#'
#'u = noloop.pref$Evocations
#'mg = c("Optional","Optional","Fifth","Fifth","Second","First","Optional",
#' "Eigth","Fifth","Third","Second","Forth","Optional","Forth","Sixth",
#' "Third","Optional","Forth","Optional","Sixth")
#'u = data.frame(cbind(u,mg))
#'wra= wordevok_radar_attr(twq,u)
#'@export
wordevok_radar_attr = function(twqfile, classe)
{
radar = list()
for(i in 1:length(twqfile))
{
c = 1
n = NULL
radar[[i]] = list()
for(j in 1:length(twqfile[[i]]))
{
if(dim(twqfile[[i]][[j]])[1] != 0)
{
radar[[i]][[c]] = wordevok_quad_class(twqfile[[i]][[j]],classe)
n[c] = names(twqfile[[i]])[j]
c = c + 1
}
}
names(radar[[i]]) = n
}
class(radar) = "TELP-wordevok-radar"
return(radar)
}
#' Adequa as coordenadas para a utilização dos radares criados usando o \code{ggplot2}.
#'
#'@param radar_attr Um objeto da classe \code{TELP-wordevok-radar} criado pela
#'função \core{wordevok_radar_attr}.
#'....
#'@return \code{wordevok_radar_gg} retorna uma lista onde cada dimensão representa
#'o núcleo central da comunidade em questão bem como as relevâncias relativas de
#'cada conceito na mesma.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#' add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'twq = telp_wordevok_quad(t,"mean")
#'
#'# Macro-groups
#'
#'u = noloop.pref$Evocations
#'mg = c("Optional","Optional","Fifth","Fifth","Second","First","Optional",
#' "Eigth","Fifth","Third","Second","Forth","Optional","Forth","Sixth",
#' "Third","Optional","Forth","Optional","Sixth")
#'u = data.frame(cbind(u,mg))
#'wra= wordevok_radar_attr(twq,u)
#'wrg= wordevok_radar_gg(wra)
#'@export
wordevok_radar_gg = function(radar_attr)
{
core = list()
for(i in 1:length(radar_attr))
{
base = t(radar_attr[[i]][[1]])
new = data.frame(Community = paste("Community", i),
Core = rownames(base),
Priority = base[,3]/max(base[,3]))
setorder(new,"Core")
new[nrow(new)+1, ] = new[1,]
rownames(new) = 1:length(rownames(new))
core[[i]] = new
}
names(core) = paste0("Community_",1:length(radar_attr))
class(core) = "wordevok-radar-gg"
return(core)
}
#' Gera os gráficos de radar para o pensamento coletivo das comunidades usando \code{ggplot2}.
#'
#' ....
#'@param radar_gg Um objeto da classe \code{wordovok-radar-gg} criado pela
#'função \core{wordevok_radar_gg}.
#'....
#'@return \code{wordevok_radar_plot} retorna uma lista onde cada dimensão representa
#'um gráfico de radar do núcleo central da comunidade em questão.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'twq = telp_wordevok_quad(t,"mean")
#'
#'# Macro-groups
#'
#'u = noloop.pref$Evocations
#'mg = c("Optional","Optional","Fifth","Fifth","Second","First","Optional",
#' "Eigth","Fifth","Third","Second","Forth","Optional","Forth","Sixth",
#' "Third","Optional","Forth","Optional","Sixth")
#'u = data.frame(cbind(u,mg))
#'wra = wordevok_radar_attr(twq,u)
#'wrg = wordevok_radar_gg(wra)
#'wrp = wordevok_radar_plot(wrg)
#'wrp[[1]]
#'@export
wordevok_radar_plot = function(radar_gg)
{
for(i in 1:length(radar_gg))
{
radar_gg[[i]] = ggplot(radar_gg[[i]], aes(x = Core, y = Priority)) +
geom_polygon(aes(group = Community), color = "#006666", fill="#006666", size = 0.5, alpha=0.6) +
ggtitle(as.character(radar_gg[[i]]$Community[1])) +
xlab("") +
ylab("") +
ylim(-0.1,1) +
guides(color = guide_legend(ncol=2)) +
coord_radar() +
guides(colour=guide_legend(nrow=4, byrow=TRUE), shape=guide_legend(nrow=1, byrow=TRUE)) +
theme(
axis.ticks.y = element_blank(), axis.text.y = element_blank(),
legend.key = element_blank(), legend.title = element_blank(),
legend.background = element_rect(color="#ffffff", fill="transparent"), ### neu check !!!
panel.background = element_rect(fill = "white", colour = "white", size = 0.1, linetype = "solid"),
panel.grid.major = element_line(size = 0.3, linetype = 'solid', colour = "#dddddd")
)
}
return(radar_gg)
}
coord_radar <- function (theta = "x", start = 0, direction = 1)
{
theta <- match.arg(theta, c("x", "y"))
r <- if (theta == "x")
{
"y"
} else {
"x"
}
ggproto("CordRadar", CoordPolar, theta = theta, r = r, start = start, direction = sign(direction), is_linear = function(coord) TRUE)
}
| /R/wordevok.R | no_license | wesleyhspereira/wordevok | R | false | false | 45,072 | r | #' Creates an wordevok object.
#'
#' \code{as.wordevok} creates an object of the wordevok class from a data.frame object.
#'@param data A data.frame class object containing evocations. It may contain a column
#'identifying the individuals.
#'@param index String with the name of the column identifying the individuals.
#'@param na specifies how the missing information is informed in the original dataset.
#'For instance, if “na” is recorded as string (“Missing” or “NA” or “na” or
#'“null”), this argument should be specified accordingly: \code{na}=“Missing”
#'
#'@return \code{as.wordevok} returns a \code{wordevok} object. This class is based on a
#'list containing the following slots:
#'
#'\code{index}: stores the identifier of the evocation vectors and
#' has the name of the string inserted in the argument \code{index} of the
#' function \code{as.wordevok}. If \code{index = NULL}, the slot is named "Index".
#'
#'\code{Dataset}: stores the dataset organized according to the rules
#'established in \code{details}. Its rows are named according to the assigned
#'identifier.
#'
#'\code{Evocations}: stores the vector of standardized single evocations
#'in the dataset.
#'
#'\code{Loop}: stores the loops found (same evocation appearing two or
#'more times in the same row). Loops must be removed before using other
#'\code{wordevok} functions. Duplicates can be removed by the
#'\code{removing_loops_wordevok} function.
#'
#'@author Wesley H. S. Pereira (ICEx/UFMG), Denise Duarte (ICEx/UFMG), Rodrigo B. Ribeiro (IMPA), Gilvan Guedes (CEDEPLAR/UFMG).
#'@references Abric, J. C. (1994) Las representations sociales: aspects theoriques. In: Abric, J. C. Pratiques sociales et representations.
#' Paris: Presses Universitaires de France.
#'@references Abric, J. C. (2001) A structural approach to social representations.
#'In: Abric, J. C. Representations of the social: Bridging theoretical traditions.
#' Malden, UK: Blackwell Publishing.
#' @references Pereira, W. H. S. (2017). Representacao da estrutura do pensamento coletivo sobre
#' as enchentes do Rio Doce: conectando indivíduos afins através da teoria dos grafos.
#' Monografia (Graduacao em Estatistica) - Universidade Fedral de Minas Gerais.
#'@details
#'The \code{wordevok} class was developed to transform evocation data based on the
#'instrument used in the Free Words Association Technique (FWAT) into relational data
#'to be used in Social Network Analysis. Since evocations based on the FWAT instrument
#'are ranked by order of importance and qualitative coding and standardization of these
#'evocations may produce induced loops, it is likely that loop elimination causes blank
#'spaces between evocations for a person’s vector of evocations. The \code{wordevok} function
#'automatically corrects these blank spaces, moving evocations of lower order of
#'importance to the left of the evocation vector.
#'
#'The \code{as.wordevok} function provides the option to index each respondent
#'by an identifier (\code{string} or \code{numeric}). This identifier must occupy a column
#'in the \code{data.frame}
#'object and its name must be indicated by a \code{string} in the \code{index} parameter
#'of the function. For instance: \code{index} = "ID". If \code{index = NULL}, the function automatically indexes
#'individuals by a \code{numeric} identifier.
#'@examples
#'data("preferences")
#'pref = as.wordevok(preferences,index = "Name")
#'pref
#'@export
as.wordevok = function(data, index = NULL,na = NULL)
{
if(!is.null(na))
{
data = apply(data,2,as.character)
for(i in 1:dim(data)[2])
{
data[data[,i] == na,i] = NA
}
data = data.frame(data)
}
if(class(data)[1] != "data.frame")
{
stop ("Only objects of the data.frame class as supported by this function.")
}
if (!is.null(index)){
idx = as.character(data[,match(index,colnames(data))])
data = data[,-match(index,colnames(data))]
name = colnames(data)
data = data.frame(idx,data)
colnames(data) = c(index,name)
} else {
index = "Index"
idx = rep(1:dim(data)[1])
name = colnames(data)
data = data.frame(idx,data)
colnames(data) = c("Index",name)
}
name = colnames(data)
idx = as.character(data[,1])
evocation = data[,-1]
base = apply(evocation,2,as.character)
if(dim(evocation)[1] == 1)
{
base = t(base)
}
for (i in 1:dim(evocation)[1])
{
exchange = which(is.na(base[i,]) == TRUE)
if(length(exchange)!=0)
{
base[i,] = c(base[i,-exchange],base[i,exchange])
}
}
out = matrix(ncol = 1+dim(evocation)[2],nrow = dim(evocation)[1])
out[,1] = idx
out[,2:(1+dim(evocation)[2])] = base
colnames(out) = name
out = as.data.frame(out)
out = out[!is.na(out[,2]),]
output = list()
output[[index]] = out[,1]
rownames(out) = out[,1]
out = out[,-1]
base = apply(out,2,as.character)
if(dim(evocation)[1] == 1)
{
base = t(base)
}
rownames(base) = rownames(out)
colnames(base) = colnames(out)
evocations = list()
for(i in 1:dim(base)[2])
{
evocations[[i]] = (base[,i])
}
evocations = Reduce(c,evocations)
evocations = unique(evocations[!is.na(evocations)])
loop = data.frame(NA,NA,NA,NA)
colnames(loop) = c(index,"Evocation","Source","Target")
iter = 0
for(i in 1:dim(base)[1])
{
for(j in 1:(dim(base)[2]-1))
{
for(k in (j+1):(dim(base)[2]))
{
if(!is.na(base[i,j]) && !is.na(base[i,k]))
{
if(base[i,j] == base[i,k])
{
iter = iter + 1
loop[iter,1] = rownames(base)[i]
loop[iter,2] = base[i,j]
loop[iter,3] = colnames(base)[j]
loop[iter,4] = colnames(base)[k]
}
}
}
}
}
output$Dataset = out
output$Evocations = sort(evocations)
if(iter == 0)
{
output$Loop = NULL
} else {
output$Loop = as.data.frame(loop)
warning("The wordevok file contains loops.")
}
class(output) = "wordevok"
return(output)
}
#' Removal of loops of a wordevok object.
#'
#' This function removes the loops from an wordevok object.
#'@param wordevok A wordevok class object.
#'@return \code{removing_loops_wordevok} returns a wordevok object without loops.
#'@author Wesley Henrique Silva Pereira
#'@details
#'Under the Free Word Association Technique, original evocations can vary from
#'single words to complete sentences. However, the analysis of this original
#'evocation set is challenging, since evocations are sometimes too specific.
#'Moreover, although certain evocations are syntactically distinct, they can be
#'semantically equivalent. In this case, qualitative standardization of the original
#'evocations is used, giving rise to unique standardized evocations. These evocations
#'can be found in the slot Evocations of wordevok object.
#'
#'The standardization process creates unnatural loops in the evocation dataset:
#'the appearance of the same evocation twice for the same individual. In Graph Theory,
#'there are applications where such loops are acceptable. For the analysis of collective
#'meanings, however, this behavior does not make sense and represents a looped induced by
#'the standardization process. The best alternative to this problem is to manually try to
#'reclassify one of the repeated evocations in a different semantic class that agrees with
#'its original meaning. When manual reclassification is not possible, the
#'removing_loops_wordevok function removes from the wordevok object the repeated
#'evocation that has been assigned to the lower order of importance. In addition,
#'the function automatically rearranges the remaining evocations so that the NA
#'observations are placed to the rightest positions of the vector.
#'
#'@references Pereira, W. H. S. (2017). Representacao da estrutura do pensamento coletivo sobre
#' as enchentes do Rio Doce: conectando indivíduos afins através da teoria dos grafos.
#' Monografia (Graduacao em Estatistica) - Universidade Fedral de Minas Gerais.
#'@examples
#'#Creating a wordevok object:
#'data("preferences")
#'pref = as.wordevok(preferences,index = "Name")
#'pref
#'#Removing loops:
#'noloop.pref = removing_loops_wordevok(pref)
#'noloop.pref
#'@export
removing_loops_wordevok = function(wordevok)
{
if(class(wordevok)[1] != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(is.null(wordevok$Loop))
{
return(wordevok)
} else {
loop = wordevok$Loop
dataset = wordevok$Dataset
base = apply(dataset,2,as.character)
colnames(base) = colnames(dataset)
rownames(base) = rownames(dataset)
for (i in 1:dim(loop)[1])
{
base[match(loop[i,1],rownames(base)),loop[i,4]] = NA
}
}
wordevok$Loop = NULL
loop = data.frame(NA,NA,NA,NA)
colnames(loop) = c(names(wordevok)[1],"Evocation","Source","Target")
iter = 0
for(i in 1:dim(base)[1])
{
for(j in 1:(dim(base)[2]-1))
{
for(k in (j+1):(dim(base)[2]))
{
if(!is.na(base[i,j]) && !is.na(base[i,k]))
{
if(base[i,j] == base[i,k])
{
iter = iter + 1
loop[iter,1] = rownames(base)[i]
loop[iter,2] = base[i,j]
loop[iter,3] = colnames(base)[j]
loop[iter,4] = colnames(base)[k]
}
}
}
}
}
if(iter == 0)
{
wordevok$Loop = NULL
} else {
warning("This dataset contains loops")
wordevok$Loop = as.data.frame(loop)
}
for (i in 1:dim(base)[1])
{
exchange = which(is.na(base[i,]) == TRUE)
if(length(exchange)!=0)
{
base[i,] = c(base[i,-exchange],base[i,exchange])
}
}
wordevok$Dataset = as.data.frame(base)
return(wordevok)
}
#' Measuring affinity within two individuals.
#'
#' Under Free Word Association Technique, this function measures the affinity
#' between the evoked vectors of two individuals.
#'@param subject1 Evoked vector of the individual 1.
#'@param subject2 Evoked vector of the individual 2.
#'@return \code{affinity} returns a numeric measure of affinity between the vectors
#'inputed.
#'@author Wesley H. S. Pereira (ICEx/UFMG), Denise Duarte (ICEx/UFMG), Rodrigo B. Ribeiro (IMPA), Gilvan Guedes (CEDEPLAR/UFMG).
#'@details
#'
#'Under the criteria of the Free Word Association Technique, we propose a
#'coefficient to calculate the affinity (similiraty) between two vectors of evocations.
#'Details on the affinity coefficient can be found in
#'\url{http://wesleyhenriquesp.wixsite.com/rwordevok/affinity}.
#'
#'The maximum number of evocations allowed to a vector is controlled by the length
#'of the vectors. Therefore, the vectors \code{subject1} and \code{subject2}
#'must have equal size. If an individual pronounces less than the maximum \code{N},
#'the vector must be structured so that n evocations pronounced are in the first \code{n}
#'positions of the vector according to their importance to the individual and the
#'remaining \code{N - n} positions must be filled with NA observations.
#'
#'@references Pereira, W. H. S. (2017).
#'@examples
#'#Creating evocation's vectors:
#'Murillo = c("Regression Analysis","Multivariate Statistics",
#'"General Statistiscs", "Experiment Planning", "Sampling")
#'Ingrid = c("Regression Analysis","Multivariate Statistics",
#'"Temporal Series","Statistical Quality Control", NA)
#'affinity(Murillo,Ingrid)
#'@export
affinity = function(subject1,subject2)
{
if(length(subject1) != length(subject2))
{
stop("Arguments must be the same size.")
}
n1 = length(subject1[!is.na(subject1)])
n2 = length(subject2[!is.na(subject2)])
n = max(n1,n2)
N = length(subject1)
denominator = n*n*(n+1)
cor1 = (N - n) * (N - n + 1)
cor2 = N * (N + 1)
omega_n = 1 - cor1/cor2
affinity_coefficient = 0
for (i in 1:n)
{
for(j in 1:n)
{
if(!is.na(subject1[i]) && !is.na(subject2[j]))
{
if(subject1[i] == subject2[j])
{
theta_i_j = 2*(n + 1) - (i + j)
rho_i_j = n - abs(i-j)
affinity_coefficient = affinity_coefficient + theta_i_j*rho_i_j
}
}
}
}
beta_uv = affinity_coefficient/denominator
affinity_coefficient = beta_uv*omega_n
return(affinity_coefficient)
}
#' Measuring weights to edges of personal network.
#'
#' Under Free Word Association Technique, this function measures the weights
#' assigned to each edge pair linking different evocations of the same individual.
#'@param n Number of evocations expressed by a individual.
#'@param N Maximum number of evocations expressed allowed to an individual.
#'@param output Type of output to be generated. See details to more informations.
#'@details
#'Under the criteria of the Free Word Association Technique, we suppose that that
#'the evocations that are expressed by the same individual are mentally connected.
#'We propose a strategy to measure the weight of the connection for each pair of
#' these mental connections.
#'Details on the meaning_weighted can be found in
#'\url{http://wesleyhenriquesp.wixsite.com/rwordevok/meaning-weighted}.
#'@author Wesley Henrique Silva Pereira
#'@return \code{meaning_weighted} returns a set of numeric weights that will be a
#'ssigned to each evocation pair
#'of the individual vector.
#'@examples
#'Murillo = c("Regression Analysis","Multivariate Statistics",
#'"General Statistiscs", "Experiment Planning", "Sampling")
#'n = length(Murillo)
#'N = n
#'mw = meaning_weighted(n,N)
#'colnames(mw) = Murillo
#'rownames(mw) = Murillo
#'mw
#'@export
meaning_weighted = function(n, N , output = "Matrix")
{
iter = 0
numerator = NULL
denominator = 0
if (output != "Matrix" & output != "Vector")
{
stop("This function is defined only to Vector or Matrix output.")
}
if(n < 2 || N < 2)
{
stop("This function is defined only to n and N greater than 2.")
}
if(n > N)
{
stop("This function is defined only to n lesser or equal than N.")
}
cor1 = (N - n)*(N - n + 1)
cor2 = N*(N + 1)
omega_n = 1 - cor1/cor2
if (output == "Matrix")
{
numerator = matrix(ncol = N, nrow = N)
colnames(numerator) = paste0("Evocation_",01:N)
rownames(numerator) = paste0("Evocation_",01:N)
numerator[,] = NA
for (i in 1:(n-1))
{
for (j in (i+1):n){
numerator[i,j] = ((2^(2*n+2-i-j))-1)*(n - abs(i - j))
denominator = denominator + numerator[i,j]
}
}
meaning = (numerator/denominator)*omega_n
}
if (output == "Vector")
{
for (i in 1:(n-1))
{
for (j in (i+1):n){
iter = iter + 1
numerator[iter] = ((2^(2*n+2-i-j))-1)*(n - abs(i - j))
denominator = denominator + numerator[iter]
}
}
meaning = (numerator/denominator)*omega_n
}
return(meaning)
}
#' Construction of the adjacency matrix by intersection.
#'
#' This function constructs the adjacency matrix by the number of
#' evocations in common.
#'@param wordevok A wordevok class object.
#'@author Wesley Henrique Silva Pereira
#'@return \code{wordevok_intersection_adjacency} returns the adjacency matrix in which
#'each line and each column are one
#'indiviual and each cell is the common evocation's number between the line's
#'individual and column's individual.
#'@examples
#'data("preferences")
#'mtx = wordevok_intersection_adjacency(noloop.pref)
#'mtx
#'@export
wordevok_intersection_adjacency = function(wordevok)
{
if(class(wordevok) != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(!is.null(wordevok$Loop))
{
stop("To use this function, the wordevok object cannot contain loops.")
}
adjacency = matrix(nrow = dim(wordevok$Dataset)[1], ncol = dim(wordevok$Dataset)[1])
colnames(adjacency) = as.character(wordevok[[1]])
rownames(adjacency) = colnames(adjacency)
diag(adjacency) = 0
for(i in 1:(dim(wordevok$Dataset)[1]-1))
{
subject1 = wordevok$Dataset[i,]
subject.1 = subject1[!is.na(subject1)]
for(j in (i+1):(dim(wordevok$Dataset)[1]))
{
subject2 = wordevok$Dataset[j,]
subject.2 = subject2[!is.na(subject2)]
adjacency[i,j] = sum(subject.1 %in% subject.2)
adjacency[j,i] = sum(subject.1 %in% subject.2)
}
}
return(adjacency)
}
#' Construction of the adjacency matrix by affinity coefficient.
#'
#' This function constructs the adjacency matrix by affinity coefficient.
#'@param wordevok A object of wordevok class.
#'@return \code{wordevok_affinity_adjacency} returns the adjacency matrix in which each
#'line and each column are one indiviual
#' and each cell is the affinity coefficient between the line's individual and
#' column's individual.
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'mtx
#'@export
wordevok_affinity_adjacency = function(wordevok)
{
if(class(wordevok) != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(!is.null(wordevok$Loop))
{
stop("To use this function, the wordevok object cannot contain loops.")
}
adjacency = matrix(nrow = dim(wordevok$Dataset)[1], ncol = dim(wordevok$Dataset)[1])
colnames(adjacency) = as.character(wordevok[[1]])
rownames(adjacency) = colnames(adjacency)
adjacency[,] = 0
for(i in 1:(dim(wordevok$Dataset)[1]-1))
{
subject1 = apply(wordevok$Dataset[i,],2,as.character)
subject.1 = subject1[!is.na(subject1)]
for(j in (i+1):(dim(wordevok$Dataset)[1]))
{
subject2 = apply(wordevok$Dataset[j,],2,as.character)
subject.2 = subject2[!is.na(subject2)]
if(sum(subject.1 %in% subject.2) > 0)
{
adjacency[i,j] = affinity(subject1,subject2)
adjacency[j,i] = adjacency[i,j]
}
}
}
return(adjacency)
}
#' Construction of the binary adjacency matrix.
#'
#' This function constructs the binary adjacency matrix.
#'@param wordevok A wordevok class object.
#'@return \code{wordevok_intersection_adjacency} returns the adjacency matrix
#'in which
#'each line and each column are one indiviual
#'and each cell is the sum of all weigths assigned to that evocation link.
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_binary_adjacency(noloop.pref)
#'mtx
#'@export
wordevok_binary_adjacency = function(wordevok)
{
if(class(wordevok) != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(!is.null(wordevok$Loop))
{
stop("To use this function, the wordevok object cannot contain loops.")
}
adjacency = matrix(nrow = dim(wordevok$Dataset)[1], ncol = dim(wordevok$Dataset)[1])
colnames(adjacency) = as.character(wordevok[[1]])
rownames(adjacency) = colnames(adjacency)
adjacency[,] = 0
for(i in 1:(dim(wordevok$Dataset)[1]-1))
{
subject1 = wordevok$Dataset[i,]
subject.1 = subject1[!is.na(subject1)]
for(j in (i+1):(dim(wordevok$Dataset)[1]))
{
subject2 = wordevok$Dataset[j,]
subject.2 = subject2[!is.na(subject2)]
if(sum(subject.1 %in% subject.2) > 0)
{
adjacency[i,j] = 1
adjacency[j,i] = 1
}
}
}
return(adjacency)
}
#' Construction of the adjacency matrix by meaning weighted method.
#'
#' This function constructs the adjacency matrix by meaning weighted method.
#'@param wordevok A wordevok class object.
#'@author Wesley Henrique Silva Pereira
#'@return \code{wordevok_meaning_adjacency} returns the adjacency matrix in which each
#'line and each column are an evocation
#'and each cell takes on 1 if the line's individual and column's individual have
#'common evocations. Otherwise, the cells takes on 0.
#'@examples
#'data("preferences")
#'mtx = wordevok_meaning_adjacency(noloop.pref)
#'mtx
#'@export
wordevok_meaning_adjacency = function(wordevok)
{
if(class(wordevok)[1] != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(!is.null(wordevok$Loop))
{
stop("To use this function, the wordevok object cannot contain loops.")
}
evocations = wordevok$Evocations
meaning = wordevok_meaning_list(wordevok,"Simplify")
adjacency = matrix(ncol = length(evocations),nrow = length(evocations))
colnames(adjacency) = evocations
rownames(adjacency) = evocations
adjacency[,] = 0
for(i in 1:dim(meaning)[1])
{
source = as.character(meaning[i,1])
target = as.character(meaning[i,2])
adjacency[source,target] = meaning[i,3]
adjacency[target,source] = meaning[i,3]
}
return(adjacency)
}
#' Construction of the list of edges by meaning weighted method.
#'
#' This function constructs the list of edges by meaning weighted method.
#'@param wordevok A wordevok class object.
#'@param output Type of output to be generated. See details to more information.
#'@author Wesley Henrique Silva Pereira
#'@return \code{wordevok_meaning_list} returns a list containing the edges and their
#'respective weights.
#'@details
#'The output argument of the function x must be:
#'
#'- "Complete": the object contains the identifier of the individual
#'generating the edge, the vertices bordering the edge and the weights
#'assigned to the edges.
#'
#'- "Edgelist": the same elements of the "Complete" output, except the
#'identifier of the individual generator.
#'
#'- "Simplify": this output contains only the unique edges and their weights.
#'The weights associated with these represent the sum of the
#'weights of all your duplicates.
#'@examples
#'data("preferences")
#'lst = wordevok_meaning_list(noloop.pref)
#'lst
#'@export
wordevok_meaning_list = function(wordevok, output = "Complete")
{
if(class(wordevok)[1] != "wordevok")
{
stop ("Only objects of the wordevok class as supported by this function.")
}
if(!is.null(wordevok$Loop))
{
stop("To use this function, the wordevok object cannot contain loops.")
}
if(output != "Complete" && output != "Edgelist" && output != "Simplify")
{
stop("This output type is not defined.")
}
dataset = apply(wordevok$Dataset,2,as.character)
n = dim(dataset)[2]
source = list()
for (i in 1:n)
{
source[[i]] = as.data.frame(cbind(as.character(wordevok[[1]]),dataset[,i],i))
}
target = list()
c = 0
for (i in 1:(n-1))
{
for (j in (i+1):n)
{
c = c + 1
target[[c]] = as.data.frame(cbind(source[[i]],source[[j]][,-1]))
}
}
listing = as.data.frame(rbindlist(target))
listing = as.data.frame(apply(listing,2,as.character))
listing = na.omit(listing)
colnames(listing) = c(names(wordevok)[1],"Source","Source_ID","Target","Target_ID")
listing = as.data.frame(apply(listing,2,as.character))
ponder = split(listing,listing[,1])
for (i in 1:length(ponder))
{
conect = max(as.numeric(as.character(ponder[[i]]$Target_ID)))
ponder[[i]]$weight = meaning_weighted(conect,n,"Vector")
}
listing = as.data.frame(rbindlist(ponder))
if(output == "Complete")
{
return(listing[,c(names(wordevok)[1],"Source","Target","weight")])
} else {
listing = listing[,c("Source","Target","weight")]
if(output == "Edgelist")
{
return(listing)
} else {
combining = t(combn(wordevok$Evocations,2))
weigh = NULL
for(i in 1:dim(combining)[1]){
verse = sum(listing[listing$Source == combining[i,1] &
listing$Target == combining[i,2],"weight"])
inverse = sum(listing[listing$Source == combining[i,2] &
listing$Target == combining[i,1],"weight"])
weigh[i] = verse + inverse
}
listing = as.data.frame(combining)
listing$v3 = weigh
listing = listing[listing$v3 > 0,]
colnames(listing) = c("Source","Target","weight")
return(listing)
}
}
}
#' Construction of the adjacency matrix by laplacian method.
#'
#' This function constructs the adjacency matrix by laplacian method.
#'@param wordevok A wordevok class object.
#'@param normalized Logical argument. If \code{normalized = TRUE}, the output will be
#'normalized.
#'@return \code{wordevok_laplacian_adjacency} returns the adjacency matrix in which each
#'line and each column are an
#'evocation and each cell takes on -1 if the line's individual and column's
#'individual have common evocations. The diagonal of the matrix equals the number of
#'edges incident on the vertex.
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_laplacian_adjacency(noloop.pref)
#'mtx
#'@export
wordevok_laplacian_adjacency = function(wordevok,normalized = FALSE)
{
laplacian = wordevok_binary_adjacency(wordevok)
dgn = as.numeric(rowSums(laplacian))
laplacian = laplacian*(-1)
for (i in 1:length(dgn))
{
laplacian[i,i] = dgn[i]
}
if (normalized)
{
for (i in 1:length(dgn))
{
if(dgn[i] != 0)
{
laplacian[i,] = laplacian[i,]/dgn[i]
}
}
return(laplacian)
} else {
return(laplacian)
}
}
#' Extrai uma lista contendo as evocações das comunidades do objeto wordevok.
#'
#' Essa função extrai, mediante a informação dos membros de cada comunidade
#' encontrada por um método de agrupamento em redes, uma lista contendo os
#' subconjuntos de evocações de cada uma das comunidades informadas.
#'@param wordevok Um objeto da classe wordevok.
#'@param groups Uma lista contendo os membros da respectiva comunidade.
#'....
#'@return \code{wordevok_comm_subsets} Retorna uma lista onde em cada uma das
#'dimensões são alocados os subconjuntos do objeto \code{wordevok} orginal
#'de acordo com as comunidades informadas.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#' add.rownames = "names")
#'c = cluster_louvain(g,weights = E(g)$weight)
#'sub = wordevok_comm_subsets(noloop.pref,c)
#'@export
wordevok_comm_subsets = function(wordevok,groups)
{
n = length(groups)
subsets = list()
for (i in 1:n)
{
subsets[[i]] = wordevok$Dataset[match(groups[[i]],rownames(wordevok$Dataset)),]
}
names(subsets) = paste0("Group",1:n)
return(subsets)
}
#' Extract the submatrix of adjacency matrix under founded communities.
#'
#' ....
#'@param adjacency A adjacency matrix.
#'@param groups Uma lista contendo os membros da respectiva comunidade.
#'@param laplacian ...
#'....
#'@return \code{wordevok_comm_submatrix} ...
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g,weights = E(g)$weight)
#'sub = wordevok_comm_submatrix(mtx,c,laplacian = FALSE)
#'@export
wordevok_comm_submatrix = function(adjacency,groups,laplacian = FALSE)
{
n = length(groups)
name = rownames(adjacency)
submatrix = list()
for (i in 1:n)
{
idx = match(groups[[i]],name)
if (length(idx) > 1)
{
mtx = adjacency[idx,]
mtx = mtx[,idx]
submatrix[[i]] = mtx
} else {
mtx = adjacency[idx,]
mtx = mtx[idx]
submatrix[[i]] = mtx
}
}
names(submatrix) = paste0("group",1:n)
if(laplacian == FALSE)
{
return(submatrix)
} else {
for (i in 1:n)
{
mtx = submatrix[[i]]
s = colSums(mtx)
for (j in 1:dim(mtx)[1])
{
mtx[j,j] = mtx[j,j] - s[j]
}
submatrix[[i]] = mtx
}
return(submatrix)
}
}
#' Transforma uma matriz de adjacências em uma lista de arestas.
#'
#' Esta função transforma a matriz de adjacências em uma lista de arestas.
#'@param adjacency Uma matriz de adjacencias.
#'...
#'@return \code{wordevok_adjacency_to_list} retorna uma lista de adjacências
#'conforme fornecida pela matriz de adjacências original. É importante ressaltar
#'que essa transformação ignora as limitações de se representar as conexões do
#'grafo através de uma lista de adjacência (como por exemplo a incapacidade de
#'represnetar vértices isolados). As modificações necessárias ficam
#'a cargo do usuário.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'lst = wordevok_adjacency_to_list(mtx)
#'@export
wordevok_adjacency_to_list = function(adjacency)
{
name = rownames(adjacency)
combination = as.data.frame(t(combn(name,2)))
colnames(combination) = c("Source","Target")
combination$weight = 0
for (i in 1:dim(combination)[1])
{
combination$weight[i] = adjacency[as.character(combination$Source[i]),
as.character(combination$Target[i])]
}
return(combination)
}
#' Cria subgrupos de uma lista de acordo com os grupos informados.
#'
#' Esta função cria sublistas de adjacência mediante à informação das
#' comunidades.
#'
#'@param list Uma lista de arestas.
#'@param groups Uma lista contendo os membros da respectiva comunidade.
#'....
#'@return \code{split_list_wordevok} retorna uma lista contendo em cada dimensão
#'as ligações por comunidade. Assim, a função ignora as ligações entre comunidades, deixando apenas as
#' ligações entre membros de uma mesma comunidade.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_meaning_list(noloop.pref)
#'g = graph_from_data_frame(mtx[,c(2,3)], directed = FALSE)
#'c = cluster_louvain(g)
#'lst = split_list_wordevok(mtx,c)
#'@export
split_list_wordevok = function(list,groups)
{
evo = list()
for(i in 1:length(groups))
{
step1 = split(list,list$Source)
step2 = step1[groups[[i]]]
step3 = data.frame(rbindlist(step2))
step4 = split(step3,step3$Target)
step5 = step4[groups[[i]]]
evo[[i]] = data.frame(rbindlist(step5))
}
return(evo)
}
#' Armazena a frequência de evocação e a ordem média de evocação de cada evocação
#' única padronizada.
#'
#' ....
#'@param wordevok Um objeto da classe wordevok.
#'@param groups Uma lista contendo os membros da respectiva comunidade.
#'....
#'@return \code{telp_wordevok} retorna uma lista que contém em cada dimensão sublistas
#'com um data.frame contendo as evocações únicas, ordem média de evocação e frequência
#'de evocação por comunidade.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'@export
telp_wordevok = function(wordevok,groups)
{
cp = wordevok_comm_subsets(wordevok,groups)
for(i in 1:length(cp))
{
cp[[i]]$IDCLASS = rownames(cp[[i]])
}
lcp = list()
for (i in 1:length(cp))
{
lcp[[i]] = as.wordevok(cp[[i]],"IDCLASS")
}
fcp = list()
for (i in 1:length(lcp))
{
fcp[[i]] = table(as.matrix(lcp[[i]]$Dataset))
}
wcp = list()
for (i in 1:length(fcp))
{
wcp[[i]] = rownames(fcp[[i]])
}
ome = list()
for (i in 1:length(lcp))
{
ome[[i]] = rep(1:length(wcp[[i]]))
for(j in 1:length(wcp[[i]]))
{
k = dim(lcp[[i]]$Dataset)[2]
eval(parse(text=paste0("ome[[",i,"]][",j,"] = round((",paste(paste0(1:k,
"*","sum(na.omit(as.integer(lcp[[",i,
"]]$Dataset[,",1:k,"]==wcp[[",i,"]][",j,
"])))"),collapse = "+"),")/fcp[[",i,"]][",
j,"],2)")))
}
}
resini = list()
for(i in 1:length(ome))
{
resini[[i]] = cbind(wcp[[i]],fcp[[i]],ome[[i]])
rownames(resini[[i]])=rep("",nrow(resini[[i]]))
}
mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
cfmean = list()
for(i in 1:length(resini))
{
cfmean[[i]] = rep(0,2)
cfmean[[i]][1] = round(mean(as.numeric(resini[[i]][,2])),2)
cfmean[[i]][2] = round(mean(as.numeric(resini[[i]][,3])),2)
}
cfmedian = list()
for(i in 1:length(resini))
{
cfmedian[[i]] = rep(0,2)
cfmedian[[i]][1] = round(median(as.numeric(resini[[i]][,2])),2)
cfmedian[[i]][2] = round(median(as.numeric(resini[[i]][,3])),2)
}
cfmode = list()
for(i in 1:length(resini))
{
cfmode[[i]] = rep(0,2)
cfmode[[i]][1] = round(mode(as.numeric(resini[[i]][,2])),2)
cfmode[[i]][2] = round(mode(as.numeric(resini[[i]][,3])),2)
}
output = list()
for (i in 1:length(resini))
{
output[[i]] = list()
output[[i]][[1]] = resini[[i]]
output[[i]][[2]] = cfmean[[i]]
output[[i]][[3]] = cfmedian[[i]]
output[[i]][[4]] = cfmode[[i]]
names(output[[i]]) = c("resini","cfmean","cfmedian","cfmode")
class(output[[i]]) = "TELP-wordevok"
}
class(output) = "Multiple-TELP-wordevok"
return(output)
}
#' Apresenta o gráfico de quadrantes de Vergés.
#'
#' Esta função apresenta o gráfico de quadrantes de Vergés.
#'
#'@param twfile Uma dimensão de um objeto da classe \code{Multiple-TELP-wordevok}, gerado pela função \code{telp_wordevok}
#'@param ln Lógico. \code{TRUE} implica na utilização da escala logarítmica nos valores encontrados.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'telp_wordevok_plot(t[[1]])
#'@export
telp_wordevok_plot = function(twfile,ln=FALSE)
{
if(class(twfile) != "TELP-wordevok")
{
stop("Erro ainda n?o definido.")
}
if(ln)
{
plot(log(as.numeric(twfile$resini[,2])),
log(as.numeric(twfile$resini[,3])),
xlab="Logaritmo da Frequencia de Evocacao",
ylab="Logaritmo da Ordem Media de Evocacao",
col=rainbow(nrow(twfile$resini)),
pch=19)
abline(v=log(as.numeric(twfile$cfmean[1])),
h=log(as.numeric(twfile$cfmean[2])), col = "gray20")
abline(v=log(as.numeric(twfile$cfmedian[1])),
h=log(as.numeric(twfile$cfmedian[2])), col = "red")
abline(v=log(as.numeric(twfile$cfmode[1])),
h=log(as.numeric(twfile$cfmode[2])), col = "purple")
} else {
plot((as.numeric(twfile$resini[,2])),
(as.numeric(twfile$resini[,3])),
xlab="Frequencia de Evocacao",
ylab="Ordem Media de Evocacao",
col=rainbow(nrow(twfile$resini)),
pch=19)
abline(v=(as.numeric(twfile$cfmean[1])),
h=(as.numeric(twfile$cfmean[2])), col = "gray20")
abline(v=(as.numeric(twfile$cfmedian[1])),
h=(as.numeric(twfile$cfmedian[2])), col = "red")
abline(v=(as.numeric(twfile$cfmode[1])),
h=(as.numeric(twfile$cfmode[2])), col = "purple")
}
}
#' Divide as evocações por seus respectivos quadrantes de Vergès segundo o método selecionado.
#'
#'
#'@param mtwfile Um objeto da classe \code{Multiple-TELP-wordevok}, gerado pela função \code{telp_wordevok}.
#'@param method Criterio de discriminação dos quadrantes.
#'....
#'@return \code{telp_wordevok_quad} retorna uma lista contendo os quadrantes de Vergès
#'da representação social. Cada dimensão do objeto resultante representa uma comunidade, e cada subdimensão
#'representa um quadrante daquela comunidade.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'twq = telp_wordevok_quad(t,"mean")
#'@export
telp_wordevok_quad = function(mtwfile,method = "mean")
{
quad = list()
if (method == "mean")
{
m = 2
} else {
if (method == "median")
{
m = 3
} else {
m = 4
}
}
for(i in 1:length(mtwfile))
{
quad[[i]] = list()
resini = mtwfile[[i]]$resini
resini = data.frame(resini)
quad[[i]][[1]] = data.frame(resini[(as.integer(resini[,2])>=mtwfile[[i]][[m]][1]&as.numeric(resini[,3])<mtwfile[[i]][[m]][2]),])
colnames(quad[[i]][[1]]) = c("Evocation","Frequency","AEO")
quad[[i]][[2]] = data.frame(resini[(as.integer(resini[,2])>=mtwfile[[i]][[m]][1]&as.numeric(resini[,3])>=mtwfile[[i]][[m]][2]),])
colnames(quad[[i]][[2]]) = c("Evocation","Frequency","AEO")
quad[[i]][[3]] = data.frame(resini[(as.integer(resini[,2])<mtwfile[[i]][[m]][1]&as.numeric(resini[,3])<mtwfile[[i]][[m]][2]),])
colnames(quad[[i]][[3]]) = c("Evocation","Frequency","AEO")
quad[[i]][[4]] = data.frame(resini[(as.integer(resini[,2])<mtwfile[[i]][[m]][1]&as.numeric(resini[,3])>=mtwfile[[i]][[m]][2]),])
colnames(quad[[i]][[4]]) = c("Evocation","Frequency","AEO")
names(quad[[i]]) = c("Quad1","Quad2","Quad3","Quad4")
}
class(quad) = "TELP-wordevok-quad"
return(quad)
}
#' Calcula as coordenadas da relevância para os signficados dos quadrantes de
#' uma determinada comunidade.
#'
#'@param quad Uma subdimensão de um objeto da classe \code{TELP-wordevok-quad} gerado pela função
#'telp-wordevok-quad.
#'@param classe Um documento listando as classes às quais pertencem as evocações
#'únicas do wordevok. Veja os detalhes para mais informações.
#'....
#'@return \code{wordevok_quad_class} Retorna as coordenadas de relevância relativa
#'de um determinado significado para um quadrante de uma comunidade.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'twq = telp_wordevok_quad(t,"mean")
#'
#'# Macro-groups
#'
#'u = noloop.pref$Evocations
#'mg = c("Optional","Optional","Fifth","Fifth","Second","First","Optional",
#' "Eigth","Fifth","Third","Second","Forth","Optional","Forth","Sixth",
#' "Third","Optional","Forth","Optional","Sixth")
#'u = data.frame(cbind(u,mg))
#'wqc= wordevok_quad_class(twq[[1]][[1]],u)
#'@export
wordevok_quad_class = function(quad,classe)
{
classe = apply(classe,2,as.character)
if(dim(quad)[1] == 1)
{
quad = apply(quad,2,as.character)
quad = t(quad)
}else{
quad = apply(quad,2,as.character)
}
zata = unique(as.character(as.character(classe[,2])))
radgeral = as.data.frame(matrix(ncol = length(zata),nrow=1))
colnames(radgeral) = zata
radgeral[1,] = rep(0,length(zata))
radgeral[2:3,] = rep(0,length(zata))
for(i in 1:dim(classe)[1]){
search = match(as.character(classe[i,1]),quad[,1])
if(!is.na(search))
{
radcol = match(as.character(classe[i,2]),colnames(radgeral))
radgeral[3,radcol] = radgeral[3,radcol] + log(as.numeric(quad[search,2]))*(1/as.numeric(quad[search,3]))
}
}
radgeral[1,] = max(radgeral[3,])
return(radgeral)
}
#' Calcula as coordenadas da relevância para os signficados dos quadrantes de
#' multicomunidades.
#'
#'@param twqfile Um objeto da classe \code{TELP-wordevok-quad} gerado pela função
#'\code{telp-wordevok-quad}.
#'@param classe Um documento listando as classes às quais pertencem as evocações
#'únicas do wordevok. Veja os detalhes para mais informações.
#'....
#'@return \code{wordevok_radar_attr} retorna uma lista de coordenadas de relevância relativa
#'de um determinado significado para cada quadrante de cada comunidade inserida.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'twq = telp_wordevok_quad(t,"mean")
#'
#'# Macro-groups
#'
#'u = noloop.pref$Evocations
#'mg = c("Optional","Optional","Fifth","Fifth","Second","First","Optional",
#' "Eigth","Fifth","Third","Second","Forth","Optional","Forth","Sixth",
#' "Third","Optional","Forth","Optional","Sixth")
#'u = data.frame(cbind(u,mg))
#'wra= wordevok_radar_attr(twq,u)
#'@export
wordevok_radar_attr = function(twqfile, classe)
{
radar = list()
for(i in 1:length(twqfile))
{
c = 1
n = NULL
radar[[i]] = list()
for(j in 1:length(twqfile[[i]]))
{
if(dim(twqfile[[i]][[j]])[1] != 0)
{
radar[[i]][[c]] = wordevok_quad_class(twqfile[[i]][[j]],classe)
n[c] = names(twqfile[[i]])[j]
c = c + 1
}
}
names(radar[[i]]) = n
}
class(radar) = "TELP-wordevok-radar"
return(radar)
}
#' Adequa as coordenadas para a utilização dos radares criados usando o \code{ggplot2}.
#'
#'@param radar_attr Um objeto da classe \code{TELP-wordevok-radar} criado pela
#'função \core{wordevok_radar_attr}.
#'....
#'@return \code{wordevok_radar_gg} retorna uma lista onde cada dimensão representa
#'o núcleo central da comunidade em questão bem como as relevâncias relativas de
#'cada conceito na mesma.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#' add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'twq = telp_wordevok_quad(t,"mean")
#'
#'# Macro-groups
#'
#'u = noloop.pref$Evocations
#'mg = c("Optional","Optional","Fifth","Fifth","Second","First","Optional",
#' "Eigth","Fifth","Third","Second","Forth","Optional","Forth","Sixth",
#' "Third","Optional","Forth","Optional","Sixth")
#'u = data.frame(cbind(u,mg))
#'wra= wordevok_radar_attr(twq,u)
#'wrg= wordevok_radar_gg(wra)
#'@export
wordevok_radar_gg = function(radar_attr)
{
core = list()
for(i in 1:length(radar_attr))
{
base = t(radar_attr[[i]][[1]])
new = data.frame(Community = paste("Community", i),
Core = rownames(base),
Priority = base[,3]/max(base[,3]))
setorder(new,"Core")
new[nrow(new)+1, ] = new[1,]
rownames(new) = 1:length(rownames(new))
core[[i]] = new
}
names(core) = paste0("Community_",1:length(radar_attr))
class(core) = "wordevok-radar-gg"
return(core)
}
#' Gera os gráficos de radar para o pensamento coletivo das comunidades usando \code{ggplot2}.
#'
#' ....
#'@param radar_gg Um objeto da classe \code{wordovok-radar-gg} criado pela
#'função \core{wordevok_radar_gg}.
#'....
#'@return \code{wordevok_radar_plot} retorna uma lista onde cada dimensão representa
#'um gráfico de radar do núcleo central da comunidade em questão.
#' ...
#'@author Wesley Henrique Silva Pereira
#'@examples
#'data("preferences")
#'mtx = wordevok_affinity_adjacency(noloop.pref)
#'g = graph_from_adjacency_matrix(mtx,mode = "undirected", weighted = TRUE,
#'add.rownames = "names")
#'c = cluster_louvain(g, weights = E(g)$weight)
#'t = telp_wordevok(noloop.pref,c)
#'twq = telp_wordevok_quad(t,"mean")
#'
#'# Macro-groups
#'
#'u = noloop.pref$Evocations
#'mg = c("Optional","Optional","Fifth","Fifth","Second","First","Optional",
#' "Eigth","Fifth","Third","Second","Forth","Optional","Forth","Sixth",
#' "Third","Optional","Forth","Optional","Sixth")
#'u = data.frame(cbind(u,mg))
#'wra = wordevok_radar_attr(twq,u)
#'wrg = wordevok_radar_gg(wra)
#'wrp = wordevok_radar_plot(wrg)
#'wrp[[1]]
#'@export
wordevok_radar_plot = function(radar_gg)
{
for(i in 1:length(radar_gg))
{
radar_gg[[i]] = ggplot(radar_gg[[i]], aes(x = Core, y = Priority)) +
geom_polygon(aes(group = Community), color = "#006666", fill="#006666", size = 0.5, alpha=0.6) +
ggtitle(as.character(radar_gg[[i]]$Community[1])) +
xlab("") +
ylab("") +
ylim(-0.1,1) +
guides(color = guide_legend(ncol=2)) +
coord_radar() +
guides(colour=guide_legend(nrow=4, byrow=TRUE), shape=guide_legend(nrow=1, byrow=TRUE)) +
theme(
axis.ticks.y = element_blank(), axis.text.y = element_blank(),
legend.key = element_blank(), legend.title = element_blank(),
legend.background = element_rect(color="#ffffff", fill="transparent"), ### neu check !!!
panel.background = element_rect(fill = "white", colour = "white", size = 0.1, linetype = "solid"),
panel.grid.major = element_line(size = 0.3, linetype = 'solid', colour = "#dddddd")
)
}
return(radar_gg)
}
coord_radar <- function (theta = "x", start = 0, direction = 1)
{
theta <- match.arg(theta, c("x", "y"))
r <- if (theta == "x")
{
"y"
} else {
"x"
}
ggproto("CordRadar", CoordPolar, theta = theta, r = r, start = start, direction = sign(direction), is_linear = function(coord) TRUE)
}
|
library(datasets)
data(swiss)
swissData <- swiss
# Fertility, Agriculture, Examination, Education, Catholic, Infant.Mortality
recordsNum<-nrow(swissData)
shinyServer(
function(input, output) {
# Compute the forumla text in a reactive expression since it is
# shared by the output$caption and output$mpgPlot expressions
formulaText <- reactive({
a<-paste("Infant.Mortality ~ ", input$variable)
if (input$variable2 != "None"){
a<-paste(a," + ")
a<-paste(a,input$variable2)
}
a
})
# Return the formula text for printing as a caption
output$caption <- renderText({
formulaText()
})
# Generate a plot of the requested variable against mpg and only
# include outliers if requested
output$deathPlot <- renderPlot({
swissData <- head(swiss,input$recordsNum)
boxplot(as.formula(formulaText()),
data = swissData)
})
output$densPlot <- renderPlot({
swissData <- head(swiss,input$recordsNum)
if (input$variable2 != "None"){
par(mfrow=c(1,2))
title<-paste("Density of",input$variable2)
plot(density(swissData[input$variable2][,]),main=title)
}
title<-paste("Density of",input$variable)
plot(density(swissData[input$variable][,]),main=title)
})
output$comparePlot <- renderPlot({
swissData <- head(swiss,input$recordsNum)
fit<-lm(formulaText(),data = swissData)
plot(swissData$Infant.Mortality,swissData[input$variable][,],main=input$variable)
lines(swissData$Infant.Mortality,fit$fitted, col="red")
})
output$regPlot <- renderPlot({
swissData <- head(swiss,input$recordsNum)
fit<-lm(formulaText(),data = swissData)
par(mfrow=c(2,2))
plot(fit,main=formulaText())
output$funtionLm <- renderText({
formulaText()
})
})
}
)
| /guerra-0216-1/sample3/server.R | permissive | dguerrar/blog | R | false | false | 1,984 | r | library(datasets)
data(swiss)
swissData <- swiss
# Fertility, Agriculture, Examination, Education, Catholic, Infant.Mortality
recordsNum<-nrow(swissData)
shinyServer(
function(input, output) {
# Compute the forumla text in a reactive expression since it is
# shared by the output$caption and output$mpgPlot expressions
formulaText <- reactive({
a<-paste("Infant.Mortality ~ ", input$variable)
if (input$variable2 != "None"){
a<-paste(a," + ")
a<-paste(a,input$variable2)
}
a
})
# Return the formula text for printing as a caption
output$caption <- renderText({
formulaText()
})
# Generate a plot of the requested variable against mpg and only
# include outliers if requested
output$deathPlot <- renderPlot({
swissData <- head(swiss,input$recordsNum)
boxplot(as.formula(formulaText()),
data = swissData)
})
output$densPlot <- renderPlot({
swissData <- head(swiss,input$recordsNum)
if (input$variable2 != "None"){
par(mfrow=c(1,2))
title<-paste("Density of",input$variable2)
plot(density(swissData[input$variable2][,]),main=title)
}
title<-paste("Density of",input$variable)
plot(density(swissData[input$variable][,]),main=title)
})
output$comparePlot <- renderPlot({
swissData <- head(swiss,input$recordsNum)
fit<-lm(formulaText(),data = swissData)
plot(swissData$Infant.Mortality,swissData[input$variable][,],main=input$variable)
lines(swissData$Infant.Mortality,fit$fitted, col="red")
})
output$regPlot <- renderPlot({
swissData <- head(swiss,input$recordsNum)
fit<-lm(formulaText(),data = swissData)
par(mfrow=c(2,2))
plot(fit,main=formulaText())
output$funtionLm <- renderText({
formulaText()
})
})
}
)
|
## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
setwd("/Users/tanmaynath/Dropbox/DataScience/Coursera/gettingAndCleaningdata/")
library("reshape2")
#Load feature names
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
#Load activity labels
activityLabels <- read.table("./UCI HAR Dataset/activity_labels.txt")
#Load and merge train data
xTrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
yTrain <- read.table("./UCI HAR Dataset/train/Y_train.txt")
subjectTrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
names(xTrain) <- features
names(subjectTrain) <-c("Subject")
yTrain <- as.data.frame(activityLabels[yTrain[,1],2])
# 3. Uses descriptive activity names to name the activities in the data set
names(yTrain) <- c("activityLabel")
trainData <- cbind(subjectTrain,xTrain,yTrain)
#Load and merge test data
xTest <- read.table("./UCI HAR Dataset/test/X_test.txt")
yTest <- read.table("./UCI HAR Dataset/test/Y_test.txt")
subjectTest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
names(xTest) <- features
names(subjectTest) <-c("Subject")
yTest <- as.data.frame(activityLabels[yTest[,1],2])
# 3. Uses descriptive activity names to name the activities in the data set
names(yTest) <- c("activityLabel")
testData <- cbind(subjectTest,xTest,yTest)
# 1 Merges the training and the test sets to create one data set
finalData <- rbind(trainData,testData)
#names(finalData) = features
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
extractFeatures <- finalData[,grepl("mean|std", features)]
id_labels = c("Subject", "activityLabel")
meltData = melt(finalData, id = id_labels, measure.vars = features)
# Apply mean function to dataset using dcast function
tidyData = dcast(meltData, Subject + activityLabel ~ variable, mean)
write.table(tidyData, file = "./tidyData.txt",row.name=FALSE)
| /gettingAndCleaningdata/run_analysis.R | no_license | meet10may/dataScience | R | false | false | 2,345 | r | ## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
setwd("/Users/tanmaynath/Dropbox/DataScience/Coursera/gettingAndCleaningdata/")
library("reshape2")
#Load feature names
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
#Load activity labels
activityLabels <- read.table("./UCI HAR Dataset/activity_labels.txt")
#Load and merge train data
xTrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
yTrain <- read.table("./UCI HAR Dataset/train/Y_train.txt")
subjectTrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
names(xTrain) <- features
names(subjectTrain) <-c("Subject")
yTrain <- as.data.frame(activityLabels[yTrain[,1],2])
# 3. Uses descriptive activity names to name the activities in the data set
names(yTrain) <- c("activityLabel")
trainData <- cbind(subjectTrain,xTrain,yTrain)
#Load and merge test data
xTest <- read.table("./UCI HAR Dataset/test/X_test.txt")
yTest <- read.table("./UCI HAR Dataset/test/Y_test.txt")
subjectTest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
names(xTest) <- features
names(subjectTest) <-c("Subject")
yTest <- as.data.frame(activityLabels[yTest[,1],2])
# 3. Uses descriptive activity names to name the activities in the data set
names(yTest) <- c("activityLabel")
testData <- cbind(subjectTest,xTest,yTest)
# 1 Merges the training and the test sets to create one data set
finalData <- rbind(trainData,testData)
#names(finalData) = features
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
extractFeatures <- finalData[,grepl("mean|std", features)]
id_labels = c("Subject", "activityLabel")
meltData = melt(finalData, id = id_labels, measure.vars = features)
# Apply mean function to dataset using dcast function
tidyData = dcast(meltData, Subject + activityLabel ~ variable, mean)
write.table(tidyData, file = "./tidyData.txt",row.name=FALSE)
|
#' Run burn-in breeding scheme simulations
#'
#' Allows users to run simulation and then continue again later. Output is direct input for \code{runSchemesPostBurnIn}.
#' Runs potentially multiple replications and optionally in parallel.
#'
#' @param nReplications Integer number of replications of the specific breeding scheme to run
#' @param nSimCores Integer, number of cores to optionally execute replicate simulations in parallel
#' @param bsp A list of breeding scheme parameters.
#' @param nBurnInCycles Integer number of cycles to as 'burn-in' using the \code{selCritPop} and \code{selCritPipe} settings.
#' @param iniFunc string, Function to initialize the breeding program.
#' @param productFunc string, Function to advance the product pipeline by one generation
#' @param popImprovFunc string, Function to improve the breeding population and select parents to initiate the next cycle of the breeding scheme
#' @param nBLASthreads number of cores for each worker to use for multi-thread BLAS. Will speed up, for example, genomic predictions when using selCritGRM. Careful to balance with other forms of parallel processing.
#' @param nThreadsMacs2 uses the nThreads argument in \code{runMacs2}, parallelizes founder sim by chrom.
#' @param selCritPop string, overrides the selCrit in \code{bsp} for the burn-in stage.
#' @param selCritPipe string, overrides the selCrit in \code{bsp} for the burn-in stage.
#' @return A \code{records} object containing the phenotypic records retained of the breeding scheme
#'
#' @details A wrapper to initiate the breeding program then iterate cycles of product pipeline and population improvement
#'
#' @export
runBurnInSchemes<-function(bsp,
nBurnInCycles,
selCritPop="selCritIID",
selCritPipe="selCritIID",
iniFunc="initializeScheme",
productFunc="productPipeline",
popImprovFunc="popImprov1Cyc",
nReplications=1,nSimCores=1,
nBLASthreads=NULL,nThreadsMacs2=NULL){
require(furrr); plan(multisession, workers = nSimCores)
options(future.globals.maxSize=+Inf); options(future.rng.onMisuse="ignore")
simulations<-tibble(SimRep=1:nReplications) %>%
mutate(burnInSim=future_map(SimRep,function(SimRep,...){
if(!is.null(nBLASthreads)) { RhpcBLASctl::blas_set_num_threads(nBLASthreads) }
cat("******", SimRep, "\n")
# This initiates the founding population
bsp[["initializeFunc"]] <- get(iniFunc)
bsp[["productPipeline"]] <- get(productFunc)
bsp[["populationImprovement"]] <- get(popImprovFunc)
initList <- bsp$initializeFunc(bsp,nThreadsForMacs=nThreadsMacs2)
SP <- initList$SP
bsp <- initList$bsp
records <- initList$records
## set the selection criteria for burn-in
bsp[["selCritPipeAdv"]] <- get(selCritPipe)
bsp[["selCritPopImprov"]] <- get(selCritPop)
# Burn-in cycles
cat("\n"); cat("Burn-in cycles"); cat("\n")
for (cycle in 1:nBurnInCycles){
cat(cycle, " ")
records <- bsp$productPipeline(records, bsp, SP)
records <- bsp$populationImprovement(records, bsp, SP)
}
return(list(records=records,
bsp=bsp,
SP=SP))
},
bsp=bsp,
nBurnInCycles=nBurnInCycles,
selCritPop=selCritPop,
selCritPipe=selCritPipe,
iniFunc=iniFunc,
productFunc=productFunc,
popImprovFunc=popImprovFunc,
nBLASthreads=nBLASthreads,
nThreadsMacs2=nThreadsMacs2))
plan(sequential)
return(simulations)
}
| /code/runBurnInSchemes.R | no_license | wolfemd/BreedingSchemeOpt | R | false | false | 3,661 | r | #' Run burn-in breeding scheme simulations
#'
#' Allows users to run simulation and then continue again later. Output is direct input for \code{runSchemesPostBurnIn}.
#' Runs potentially multiple replications and optionally in parallel.
#'
#' @param nReplications Integer number of replications of the specific breeding scheme to run
#' @param nSimCores Integer, number of cores to optionally execute replicate simulations in parallel
#' @param bsp A list of breeding scheme parameters.
#' @param nBurnInCycles Integer number of cycles to as 'burn-in' using the \code{selCritPop} and \code{selCritPipe} settings.
#' @param iniFunc string, Function to initialize the breeding program.
#' @param productFunc string, Function to advance the product pipeline by one generation
#' @param popImprovFunc string, Function to improve the breeding population and select parents to initiate the next cycle of the breeding scheme
#' @param nBLASthreads number of cores for each worker to use for multi-thread BLAS. Will speed up, for example, genomic predictions when using selCritGRM. Careful to balance with other forms of parallel processing.
#' @param nThreadsMacs2 uses the nThreads argument in \code{runMacs2}, parallelizes founder sim by chrom.
#' @param selCritPop string, overrides the selCrit in \code{bsp} for the burn-in stage.
#' @param selCritPipe string, overrides the selCrit in \code{bsp} for the burn-in stage.
#' @return A \code{records} object containing the phenotypic records retained of the breeding scheme
#'
#' @details A wrapper to initiate the breeding program then iterate cycles of product pipeline and population improvement
#'
#' @export
runBurnInSchemes<-function(bsp,
nBurnInCycles,
selCritPop="selCritIID",
selCritPipe="selCritIID",
iniFunc="initializeScheme",
productFunc="productPipeline",
popImprovFunc="popImprov1Cyc",
nReplications=1,nSimCores=1,
nBLASthreads=NULL,nThreadsMacs2=NULL){
require(furrr); plan(multisession, workers = nSimCores)
options(future.globals.maxSize=+Inf); options(future.rng.onMisuse="ignore")
simulations<-tibble(SimRep=1:nReplications) %>%
mutate(burnInSim=future_map(SimRep,function(SimRep,...){
if(!is.null(nBLASthreads)) { RhpcBLASctl::blas_set_num_threads(nBLASthreads) }
cat("******", SimRep, "\n")
# This initiates the founding population
bsp[["initializeFunc"]] <- get(iniFunc)
bsp[["productPipeline"]] <- get(productFunc)
bsp[["populationImprovement"]] <- get(popImprovFunc)
initList <- bsp$initializeFunc(bsp,nThreadsForMacs=nThreadsMacs2)
SP <- initList$SP
bsp <- initList$bsp
records <- initList$records
## set the selection criteria for burn-in
bsp[["selCritPipeAdv"]] <- get(selCritPipe)
bsp[["selCritPopImprov"]] <- get(selCritPop)
# Burn-in cycles
cat("\n"); cat("Burn-in cycles"); cat("\n")
for (cycle in 1:nBurnInCycles){
cat(cycle, " ")
records <- bsp$productPipeline(records, bsp, SP)
records <- bsp$populationImprovement(records, bsp, SP)
}
return(list(records=records,
bsp=bsp,
SP=SP))
},
bsp=bsp,
nBurnInCycles=nBurnInCycles,
selCritPop=selCritPop,
selCritPipe=selCritPipe,
iniFunc=iniFunc,
productFunc=productFunc,
popImprovFunc=popImprovFunc,
nBLASthreads=nBLASthreads,
nThreadsMacs2=nThreadsMacs2))
plan(sequential)
return(simulations)
}
|
install.packages("RCurl")
require(RCurl)
dat1 <- read.csv(text=getURL("https://raw.githubusercontent.com/bongsongkim/logit.regression.rice/master/IBS_matrix.csv"),row.names=1)
dat2 <- matrix(rep(2,dim(dat1)[1]^2),nrow=dim(dat1)[1])
dat <- dat2 - dat1
germ <- read.csv("RiceDiversity.44K.germplasm.csv",header=T)
subs <- germ[which(germ[,8] == "TEJ" | germ[,8] == "TRJ" | germ[,8] == "IND"),3]
subs <- paste("NSFTV_",subs,sep="")
dat <- dat[which(colnames(dat) %in% subs),which(colnames(dat) %in% subs)]
germ <- read.csv("RiceDiversity.44K.germplasm.csv",header=T)
germ <- germ[which(germ[,8] == "TEJ" | germ[,8] == "TRJ" | germ[,8] == "IND"),]
germ[,3] <- paste("NSFTV_",germ[,3],sep="")
germ_lbl <- germ[,3]
sorted <- germ[match(colnames(dat), germ_lbl),]
names <- paste("(",germ[match(colnames(dat), germ_lbl),8],") ",germ[match(colnames(dat), germ_lbl),4],sep="")
colnames(dat) <- names
dat <- t(dat)
colnames(dat) <- names
dat[upper.tri(dat)] <- NA
dat <- as.dist(dat, diag = TRUE)
hc <- hclust(dat)
plot(as.dendrogram(hc),horiz=T)
jpeg("ind.jap.dendrogram.jpg",width = 1300, height = 5000)
par(lwd=5,cex=0.9, mar=c(3,0,0,9))
plot(as.dendrogram(hc),horiz=T)
dev.off()
| /(Figure2,Supplementary_figure1)indica_japonica_dendrogaram.R | no_license | bongsongkim/logit.regression.rice | R | false | false | 1,334 | r |
install.packages("RCurl")
require(RCurl)
dat1 <- read.csv(text=getURL("https://raw.githubusercontent.com/bongsongkim/logit.regression.rice/master/IBS_matrix.csv"),row.names=1)
dat2 <- matrix(rep(2,dim(dat1)[1]^2),nrow=dim(dat1)[1])
dat <- dat2 - dat1
germ <- read.csv("RiceDiversity.44K.germplasm.csv",header=T)
subs <- germ[which(germ[,8] == "TEJ" | germ[,8] == "TRJ" | germ[,8] == "IND"),3]
subs <- paste("NSFTV_",subs,sep="")
dat <- dat[which(colnames(dat) %in% subs),which(colnames(dat) %in% subs)]
germ <- read.csv("RiceDiversity.44K.germplasm.csv",header=T)
germ <- germ[which(germ[,8] == "TEJ" | germ[,8] == "TRJ" | germ[,8] == "IND"),]
germ[,3] <- paste("NSFTV_",germ[,3],sep="")
germ_lbl <- germ[,3]
sorted <- germ[match(colnames(dat), germ_lbl),]
names <- paste("(",germ[match(colnames(dat), germ_lbl),8],") ",germ[match(colnames(dat), germ_lbl),4],sep="")
colnames(dat) <- names
dat <- t(dat)
colnames(dat) <- names
dat[upper.tri(dat)] <- NA
dat <- as.dist(dat, diag = TRUE)
hc <- hclust(dat)
plot(as.dendrogram(hc),horiz=T)
jpeg("ind.jap.dendrogram.jpg",width = 1300, height = 5000)
par(lwd=5,cex=0.9, mar=c(3,0,0,9))
plot(as.dendrogram(hc),horiz=T)
dev.off()
|
#' get opt merge
#'
#' Get the optimal merge matrix StARS is run.
#'
#' @param est output from \code{spiec.easi} or \code{icov.select}
#' @return Weighted, symmetric matrix of edge probability estimates.
#' @export
getOptMerge <- function(est) {
if (class(est) == "select" && est$criterion == "stars") {
return(est$merge[[est$opt.index]])
} else
stop("Run spiec-easi with criterion=\"stars\"")
}
#' get opt beta
#'
#' Get the optimal beta matrix when MB neighborhood selection is run with StARS
#'
#' @param est output from \code{spiec.easi} or \code{icov.select}
#' @return Weighted, non-symmetric matrix of model coefficients.
#' @export
getOptBeta <- function(est) {
if (class(est) == "select" && est$method == "mb") {
return(est$beta[[est$opt.index]])
} else
stop("Run spiec-easi with method=\"mb\"")
}
#' sym beta
#'
#' Symmetrize a beta (coefficient) matrix, ie. selected from MB neighborhood selection
#'
#' @param beta square coefficient matrix
#' @param mode how to symmetrize, see details
#' @details
#' Mode can be:
#' \itemize{
#' \item ave: Arithmetic average between the two possible values of beta
#' \item maxabs: The maximum [absolute] value between the two values
#' \item upper: Take the values from the upper triangle
#' \item lower: Take the values from the lower triangle
#'}
#' @return a symmetric coefficient matrix
#' @export
symBeta <- function(beta, mode='ave') {
if (mode=='ave') {
symbeta <- (beta+t(beta))/2
} else if (mode == "maxabs") {
upt <- Matrix::triu(beta)
lot <- t(Matrix::tril(beta))
suppressMessages(maxt <- pmax(abs(upt), abs(lot)))
uptind <- Matrix::which(maxt == abs(upt))
lotind <- Matrix::which(maxt == abs(lot))
if (length(uptind != 0)) maxt[uptind] <- maxt[uptind]*sign(upt[uptind])
if (length(lotind != 0)) maxt[lotind] <- maxt[lotind]*sign(lot[lotind])
symbeta <- maxt + t(maxt)
} else if (mode == "upper") {
upt <- Matrix::triu(beta)
symbeta <- upt + t(upt)
} else if (mode == "lower") {
lot <- Matrix::tril(beta)
symbeta <- lot + t(lot)
} else
stop ("mode not recognized")
as(symbeta, 'symmetricMatrix')
}
| /R/utilities.R | no_license | jamesrco/SpiecEasi | R | false | false | 2,259 | r | #' get opt merge
#'
#' Get the optimal merge matrix StARS is run.
#'
#' @param est output from \code{spiec.easi} or \code{icov.select}
#' @return Weighted, symmetric matrix of edge probability estimates.
#' @export
getOptMerge <- function(est) {
if (class(est) == "select" && est$criterion == "stars") {
return(est$merge[[est$opt.index]])
} else
stop("Run spiec-easi with criterion=\"stars\"")
}
#' get opt beta
#'
#' Get the optimal beta matrix when MB neighborhood selection is run with StARS
#'
#' @param est output from \code{spiec.easi} or \code{icov.select}
#' @return Weighted, non-symmetric matrix of model coefficients.
#' @export
getOptBeta <- function(est) {
if (class(est) == "select" && est$method == "mb") {
return(est$beta[[est$opt.index]])
} else
stop("Run spiec-easi with method=\"mb\"")
}
#' sym beta
#'
#' Symmetrize a beta (coefficient) matrix, ie. selected from MB neighborhood selection
#'
#' @param beta square coefficient matrix
#' @param mode how to symmetrize, see details
#' @details
#' Mode can be:
#' \itemize{
#' \item ave: Arithmetic average between the two possible values of beta
#' \item maxabs: The maximum [absolute] value between the two values
#' \item upper: Take the values from the upper triangle
#' \item lower: Take the values from the lower triangle
#'}
#' @return a symmetric coefficient matrix
#' @export
symBeta <- function(beta, mode='ave') {
if (mode=='ave') {
symbeta <- (beta+t(beta))/2
} else if (mode == "maxabs") {
upt <- Matrix::triu(beta)
lot <- t(Matrix::tril(beta))
suppressMessages(maxt <- pmax(abs(upt), abs(lot)))
uptind <- Matrix::which(maxt == abs(upt))
lotind <- Matrix::which(maxt == abs(lot))
if (length(uptind != 0)) maxt[uptind] <- maxt[uptind]*sign(upt[uptind])
if (length(lotind != 0)) maxt[lotind] <- maxt[lotind]*sign(lot[lotind])
symbeta <- maxt + t(maxt)
} else if (mode == "upper") {
upt <- Matrix::triu(beta)
symbeta <- upt + t(upt)
} else if (mode == "lower") {
lot <- Matrix::tril(beta)
symbeta <- lot + t(lot)
} else
stop ("mode not recognized")
as(symbeta, 'symmetricMatrix')
}
|
#!/usr/bin/env Rscript
# Definimos url base
base_url <- "http://www.nuforc.org/webreports/"
# Obtenemos el índice
ufo_reports_index <- html(paste0(base_url, "ndxevent.html"))
# Obtenemos las URLs de las páginas por día
daily_urls <- paste0(base_url, ufo_reports_index %>%
html_nodes(xpath = "//*/td[1]/*/a[contains(@href, 'ndxe')]") %>%
html_attr("href"))
# Guardamos la lista de URLs
write.table(daily_urls, "C:/Users/Amanda/Documents/archivos_gran_escala/UFO_urls.txt")
| /alumnos/amanda/Proyecto_1/lista_urls.r | no_license | aaronsapa1/big-data | R | false | false | 541 | r | #!/usr/bin/env Rscript
# Definimos url base
base_url <- "http://www.nuforc.org/webreports/"
# Obtenemos el índice
ufo_reports_index <- html(paste0(base_url, "ndxevent.html"))
# Obtenemos las URLs de las páginas por día
daily_urls <- paste0(base_url, ufo_reports_index %>%
html_nodes(xpath = "//*/td[1]/*/a[contains(@href, 'ndxe')]") %>%
html_attr("href"))
# Guardamos la lista de URLs
write.table(daily_urls, "C:/Users/Amanda/Documents/archivos_gran_escala/UFO_urls.txt")
|
#' St. Louis Homicides 1980s-90s
#'
#' Homicides and selected socio-economic characteristics for counties surrounding St Louis, MO. Data aggregated for three time periods: 1979-84 (steady decline in homicides), 1984-88 (stable period), and 1988-93 (steady increase in homicides).
#'
#' Sf object, unprojected. EPSG 4326: WGS84.
#'
#' @format An sf data frame with 78 rows, 23 variables, and a geometry column:
#' \describe{
#' \item{ FIPSNO }{ FIPS code as numeric variable }
#' \item{ NAME }{ County name }
#' \item{ STATE_NAME }{ State name }
#' \item{ STATE_FIPS }{ State FIPS code (character) }
#' \item{ CNTY_FIPS }{ County FIPS code (character) }
#' \item{ FIPS }{ Combined state and county FIPS code (character) }
#' \item{ HR7984 }{ Homicide rate per 100,000 (1979-84) }
#' \item{ HR8488 }{ Homicide rate per 100,000 (1984-88) }
#' \item{ HR8893 }{ Homicide rate per 100,000 (1988-93) }
#' \item{ HC7984 }{ Homicide count (1979-84) }
#' \item{ HC8488 }{ Homicide count (1984-88) }
#' \item{ HC8893 }{ Homicide count (1988-93) }
#' \item{ PO7984 }{ Population total (1979-84) }
#' \item{ PO8488 }{ Population total (1984-88) }
#' \item{ PO8893 }{ Population total (1988-93) }
#' \item{ PE77 }{ Police expenditures per capita, 1977 }
#' \item{ Police }{ expenditures per capita, 1982 }
#' \item{ PE87 }{ Police expenditures per capita, 1987 }
#' \item{ RDAC80 }{ Resource deprivation/affluence composite variable, 1980 }
#' \item{ RDAC85 }{ Resource deprivation/affluence composite variable, 1985 }
#' \item{ RDAC90 }{ Resource deprivation/affluence composite variable, 1990 }
#' }
#' @source S. Messner, L. Anselin, D. Hawkins, G. Deane, S. Tolnay, R. Baller (2000). An Atlas of the Spatial Patterning of County-Level Homicide, 1960-1990. Pittsburgh, PA, National Consortium on Violence Research (NCOVR) (\url{www.ncovr.heinz.cmu.edu}).
#'
#' @examples
#' if (requireNamespace("sf", quietly = TRUE)) {
#' library(sf)
#' data(stlouishom)
#' plot(stlouishom["HC8893"])
#' }
"stlouishom"
| /R/stlouishom.R | permissive | spatialanalysis/geodaData | R | false | false | 2,033 | r | #' St. Louis Homicides 1980s-90s
#'
#' Homicides and selected socio-economic characteristics for counties surrounding St Louis, MO. Data aggregated for three time periods: 1979-84 (steady decline in homicides), 1984-88 (stable period), and 1988-93 (steady increase in homicides).
#'
#' Sf object, unprojected. EPSG 4326: WGS84.
#'
#' @format An sf data frame with 78 rows, 23 variables, and a geometry column:
#' \describe{
#' \item{ FIPSNO }{ FIPS code as numeric variable }
#' \item{ NAME }{ County name }
#' \item{ STATE_NAME }{ State name }
#' \item{ STATE_FIPS }{ State FIPS code (character) }
#' \item{ CNTY_FIPS }{ County FIPS code (character) }
#' \item{ FIPS }{ Combined state and county FIPS code (character) }
#' \item{ HR7984 }{ Homicide rate per 100,000 (1979-84) }
#' \item{ HR8488 }{ Homicide rate per 100,000 (1984-88) }
#' \item{ HR8893 }{ Homicide rate per 100,000 (1988-93) }
#' \item{ HC7984 }{ Homicide count (1979-84) }
#' \item{ HC8488 }{ Homicide count (1984-88) }
#' \item{ HC8893 }{ Homicide count (1988-93) }
#' \item{ PO7984 }{ Population total (1979-84) }
#' \item{ PO8488 }{ Population total (1984-88) }
#' \item{ PO8893 }{ Population total (1988-93) }
#' \item{ PE77 }{ Police expenditures per capita, 1977 }
#' \item{ Police }{ expenditures per capita, 1982 }
#' \item{ PE87 }{ Police expenditures per capita, 1987 }
#' \item{ RDAC80 }{ Resource deprivation/affluence composite variable, 1980 }
#' \item{ RDAC85 }{ Resource deprivation/affluence composite variable, 1985 }
#' \item{ RDAC90 }{ Resource deprivation/affluence composite variable, 1990 }
#' }
#' @source S. Messner, L. Anselin, D. Hawkins, G. Deane, S. Tolnay, R. Baller (2000). An Atlas of the Spatial Patterning of County-Level Homicide, 1960-1990. Pittsburgh, PA, National Consortium on Violence Research (NCOVR) (\url{www.ncovr.heinz.cmu.edu}).
#'
#' @examples
#' if (requireNamespace("sf", quietly = TRUE)) {
#' library(sf)
#' data(stlouishom)
#' plot(stlouishom["HC8893"])
#' }
"stlouishom"
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{summary.cmds}
\alias{summary.cmds}
\title{View a summary of cmds results}
\usage{
summary.cmds(res)
}
\arguments{
\item{res}{The results from running \code{cmds}.}
}
\description{
\code{summary} prints a summary of cmds results and statistics.
}
\details{
After running \code{cmds}, the summary function prints information such as whether the algorithm converged, the embedding dimension, the distortion and total error of the embedding and the runtime. It also prints the distortion per timestep.
}
| /man/summary.cmds.Rd | no_license | ginagruenhage/cmdsr | R | false | false | 560 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{summary.cmds}
\alias{summary.cmds}
\title{View a summary of cmds results}
\usage{
summary.cmds(res)
}
\arguments{
\item{res}{The results from running \code{cmds}.}
}
\description{
\code{summary} prints a summary of cmds results and statistics.
}
\details{
After running \code{cmds}, the summary function prints information such as whether the algorithm converged, the embedding dimension, the distortion and total error of the embedding and the runtime. It also prints the distortion per timestep.
}
|
x = readRDS('~/projects/sts98/assignment-3/hcmst.rds')
dogs = readRDS("~/projects/sts98/sts98notes/data/dogs.rds")
# Questions
# Diff between variable in dataset and in program
# make up example questions, conclusions
f = function(x){
newone = 10
x^2 + newone
}
table(x$breakup)
#
bage = tapply(x$age, x$breakup, FUN=mean, na.rm=TRUE)
# Forget NA's for the moment
breakup = x$breakup
breakup[is.na(breakup)] = 'yes'
age = x$age
# What this actually does:
b1 = breakup == 'yes'
b2 = breakup == 'partner deceased'
b3 = breakup == 'no'
length(age[b1])
length(age[b2])
length(age[b3])
mean(age[b3], na.rm=TRUE)
s = x[, c('age', 'partner_age')]
s = s[complete.cases(s), ]
a = as.matrix(s)
p = prop.table(a, margin=1)
colMeans(s)
apply(s, 2, mean)
# 11AM Discussion
f = function(x){
#something = 10
x^2 + something
}
z = 1:3
sapply(z, f)
class(dogs[, 1])
class(dogs[, 2])
sapply(dogs, class)
# Data types
# Promotion
a = c(TRUE, FALSE)
a2 = c(a, 1:2)
a3 = c(a2, 3.1425)
x = list(a=1:4, b=c(1, 8.2))
sapply(x, length)
sapply(length, x)
adder = function(x){
function(y) y + x
}
a2 = adder(2)
x = rnorm(100)
| /discussion/clark/discussion5.R | no_license | AylNels/notes | R | false | false | 1,146 | r | x = readRDS('~/projects/sts98/assignment-3/hcmst.rds')
dogs = readRDS("~/projects/sts98/sts98notes/data/dogs.rds")
# Questions
# Diff between variable in dataset and in program
# make up example questions, conclusions
f = function(x){
newone = 10
x^2 + newone
}
table(x$breakup)
#
bage = tapply(x$age, x$breakup, FUN=mean, na.rm=TRUE)
# Forget NA's for the moment
breakup = x$breakup
breakup[is.na(breakup)] = 'yes'
age = x$age
# What this actually does:
b1 = breakup == 'yes'
b2 = breakup == 'partner deceased'
b3 = breakup == 'no'
length(age[b1])
length(age[b2])
length(age[b3])
mean(age[b3], na.rm=TRUE)
s = x[, c('age', 'partner_age')]
s = s[complete.cases(s), ]
a = as.matrix(s)
p = prop.table(a, margin=1)
colMeans(s)
apply(s, 2, mean)
# 11AM Discussion
f = function(x){
#something = 10
x^2 + something
}
z = 1:3
sapply(z, f)
class(dogs[, 1])
class(dogs[, 2])
sapply(dogs, class)
# Data types
# Promotion
a = c(TRUE, FALSE)
a2 = c(a, 1:2)
a3 = c(a2, 3.1425)
x = list(a=1:4, b=c(1, 8.2))
sapply(x, length)
sapply(length, x)
adder = function(x){
function(y) y + x
}
a2 = adder(2)
x = rnorm(100)
|
# laterality_plot.R
# ::prprcss::
# E.D. Gennatas lambdamd.org
#' Laterality scatter plot
#'
#' @inheritParams mplot3.xy
#' @param x data.frame or data.table which includes columns with ROI names ending in "_L" or "_R"
#' @param regionnames Character, vector: Regions to plot. For example, if \code{regionnames}
#' contains "Ant_Insula", \code{x} must contain columns \code{Ant_Insula_L} and \code{Ant_Insula_R}
#' @param summary.fn Character: Name of function to summarize left and right values.
#' Default = "median"
#' @param summary.lty Integer: line type for summary arrows
#' @param summary.lwd Float: line width for summary arrows
#' @param summary.col Color for summary arrows
#' @param arrowhead.length Float: arrowhead length in inches. Default = .075
#' @param deltas Logical, If TRUE, show summary statistics. Default = TRUE
#' @param line.col Color for individual cases' lines
#' @param line.alpha Float: transparency for individual lines
#' @param lty Integer: Line type for individual lines. Default = 1
#' @param lwd Float: Line width for individual lines. Default = .3
#' @param ylim Float, vector, length 2: y-axis limits
#'
#' @author E.D. Gennatas
#' @export
mplot3.laterality <- function(x, regionnames,
main = NULL,
ylab = "Left to Right",
summary.fn = "median",
summary.lty = 1,
summary.lwd = 2.5,
summary.col = NULL,
arrowhead.length = .075,
deltas = TRUE,
line.col = theme$fg,
line.alpha = .25,
lty = 1,
lwd = .3,
ylim = NULL,
theme = getOption("rt.theme", "lightgrid"),
labelify = TRUE,
autolabel = letters,
# na.rm = TRUE,
mar = NULL,
oma = rep(0, 4),
pty = "m",
palette = getOption("rt.palette", "rtCol1"),
par.reset = TRUE,
pdf.width = 6,
pdf.height = 6,
filename = NULL, ...) {
setDT(x)
xnames <- names(x)
.names <- c(paste0(regionnames, "_L"), paste0(regionnames, "_R"))
index <- sapply(.names, function(i) grep(paste0(i, "$"), xnames))
if (is.null(ylim)) ylim <- getlim(unlist(x[, ..index]))
xlim <- c(.5, length(regionnames)*2 + .5)
if (is.character(palette)) palette <- rtPalette(palette)
if (is.null(summary.col)) summary.col <- palette[seq_along(regionnames)]
if (is.null(mar)) {
bottom.mar <- textwidth(regionnames)
mar <- c(bottom.mar, 3, 2, 1)
}
# Theme ====
extraargs <- list(...)
if (is.character(theme)) {
theme <- do.call(paste0("theme_", theme), extraargs)
} else {
for (i in seq(extraargs)) {
theme[[names(extraargs)[i]]] <- extraargs[[i]]
}
}
# Plot ====
if (!is.null(filename)) pdf(filename, width = pdf.width, height = pdf.height,
title = "rtemis Graphics")
par.orig <- par(no.readonly = TRUE)
if (!is.null(rtenv$rtpar)) {
par.reset <- FALSE
par(mar = mar, bg = theme$bg, pty = pty, cex = theme$cex)
} else {
par(mar = mar, oma = oma, bg = theme$bg, pty = pty, cex = theme$cex)
}
if (par.reset) on.exit(suppressWarnings(par(par.orig)))
plot(NULL, NULL, xlim = xlim, ylim = ylim, bty = "n",
axes = FALSE, ann = FALSE,
xaxs = "i", yaxs = "i")
# Plot bg ====
if (!is.na(theme$plot.bg)) {
rect(xlim[1], ylim[1], xlim[2], ylim[2], border = NA, col = theme$plot.bg)
}
# Grid ====
if (theme$grid) {
grid(nx = 0,
ny = theme$grid.ny,
col = colorAdjust(theme$grid.col, theme$grid.alpha),
lty = theme$grid.lty,
lwd = theme$grid.lwd)
}
# lat plot ====
line.col <- adjustcolor(line.col, line.alpha)
delta <- numeric(length(regionnames))
for (i in seq_along(regionnames)) {
vleft <- x[[grep(paste0(regionnames[i], "_L$"), names(x))]]
vright <- x[[grep(paste0(regionnames[i], "_R$"), names(x))]]
vleft.summary <- do.call(summary.fn, list(vleft))
vright.summary <- do.call(summary.fn, list(vright))
segments(x0 = 2*i - 1.2, x1 = 2*i + .2, y0 = vleft, y1 = vright,
lty = lty, lwd = lwd, col = line.col)
arrows(x0 = 2*i - 1.2, x1 = 2*i + .2,
y0 = vleft.summary, y1 = vright.summary,
length = arrowhead.length,
lty = summary.lty, lwd = summary.lwd, col = summary.col[[i]])
delta[i] <- vright.summary - vleft.summary
}
# y-axis ====
axis(side = 2,
las = theme$y.axis.las,
padj = theme$y.axis.padj,
hadj = theme$y.axis.hadj,
col.ticks = adjustcolor(theme$tick.col, theme$tick.alpha),
col = NA, # The axis line, which we want to omit
col.axis = theme$tick.labels.col, # the axis numbers i.e. tick labels
tck = theme$tck,
tcl = theme$tcl,
cex = theme$cex,
family = theme$font.family)
# regionnames ====
mtext(text = if (labelify) labelify(regionnames) else regionnames,
side = 1,
line = 1,
las = 2, at = seq(regionnames)*2 - .5,
col = theme$labs.col)
# ylab ====
if (!is.null(ylab)) mtext(ylab, side = theme$y.axis.side,
line = theme$ylab.line, cex = theme$cex,
# adj = ylab.adj,
col = theme$labs.col,
family = theme$font.family)
# deltas ====
if (deltas) {
mtext(text = paste(summary.fn, "delta"),
side = 3, line = 1.1, adj = 0,
col = adjustcolor(theme$fg, .5))
mtext(text = ddSci(delta),
side = 3, line = .2,
at = seq(regionnames)*2 - .5,
col = unlist(summary.col))
}
# Main Title ====
if (!is.null(rtenv$autolabel)) {
autolab <- autolabel[rtenv$autolabel]
main <- paste(autolab, main)
rtenv$autolabel <- rtenv$autolabel + 1
}
if (!is.null(main)) {
mtext(text = main, side = 3, line = theme$main.line,
font = theme$main.font, adj = theme$main.adj,
cex = theme$cex, col = theme$main.col,
family = theme$font.family)
}
# Outro ====
if (!is.null(filename)) dev.off()
invisible(delta)
} # rtemis::mplot3.laterality
| /R/mplot3.laterality.R | no_license | tlarzg/rtemis | R | false | false | 6,638 | r | # laterality_plot.R
# ::prprcss::
# E.D. Gennatas lambdamd.org
#' Laterality scatter plot
#'
#' @inheritParams mplot3.xy
#' @param x data.frame or data.table which includes columns with ROI names ending in "_L" or "_R"
#' @param regionnames Character, vector: Regions to plot. For example, if \code{regionnames}
#' contains "Ant_Insula", \code{x} must contain columns \code{Ant_Insula_L} and \code{Ant_Insula_R}
#' @param summary.fn Character: Name of function to summarize left and right values.
#' Default = "median"
#' @param summary.lty Integer: line type for summary arrows
#' @param summary.lwd Float: line width for summary arrows
#' @param summary.col Color for summary arrows
#' @param arrowhead.length Float: arrowhead length in inches. Default = .075
#' @param deltas Logical, If TRUE, show summary statistics. Default = TRUE
#' @param line.col Color for individual cases' lines
#' @param line.alpha Float: transparency for individual lines
#' @param lty Integer: Line type for individual lines. Default = 1
#' @param lwd Float: Line width for individual lines. Default = .3
#' @param ylim Float, vector, length 2: y-axis limits
#'
#' @author E.D. Gennatas
#' @export
mplot3.laterality <- function(x, regionnames,
main = NULL,
ylab = "Left to Right",
summary.fn = "median",
summary.lty = 1,
summary.lwd = 2.5,
summary.col = NULL,
arrowhead.length = .075,
deltas = TRUE,
line.col = theme$fg,
line.alpha = .25,
lty = 1,
lwd = .3,
ylim = NULL,
theme = getOption("rt.theme", "lightgrid"),
labelify = TRUE,
autolabel = letters,
# na.rm = TRUE,
mar = NULL,
oma = rep(0, 4),
pty = "m",
palette = getOption("rt.palette", "rtCol1"),
par.reset = TRUE,
pdf.width = 6,
pdf.height = 6,
filename = NULL, ...) {
setDT(x)
xnames <- names(x)
.names <- c(paste0(regionnames, "_L"), paste0(regionnames, "_R"))
index <- sapply(.names, function(i) grep(paste0(i, "$"), xnames))
if (is.null(ylim)) ylim <- getlim(unlist(x[, ..index]))
xlim <- c(.5, length(regionnames)*2 + .5)
if (is.character(palette)) palette <- rtPalette(palette)
if (is.null(summary.col)) summary.col <- palette[seq_along(regionnames)]
if (is.null(mar)) {
bottom.mar <- textwidth(regionnames)
mar <- c(bottom.mar, 3, 2, 1)
}
# Theme ====
extraargs <- list(...)
if (is.character(theme)) {
theme <- do.call(paste0("theme_", theme), extraargs)
} else {
for (i in seq(extraargs)) {
theme[[names(extraargs)[i]]] <- extraargs[[i]]
}
}
# Plot ====
if (!is.null(filename)) pdf(filename, width = pdf.width, height = pdf.height,
title = "rtemis Graphics")
par.orig <- par(no.readonly = TRUE)
if (!is.null(rtenv$rtpar)) {
par.reset <- FALSE
par(mar = mar, bg = theme$bg, pty = pty, cex = theme$cex)
} else {
par(mar = mar, oma = oma, bg = theme$bg, pty = pty, cex = theme$cex)
}
if (par.reset) on.exit(suppressWarnings(par(par.orig)))
plot(NULL, NULL, xlim = xlim, ylim = ylim, bty = "n",
axes = FALSE, ann = FALSE,
xaxs = "i", yaxs = "i")
# Plot bg ====
if (!is.na(theme$plot.bg)) {
rect(xlim[1], ylim[1], xlim[2], ylim[2], border = NA, col = theme$plot.bg)
}
# Grid ====
if (theme$grid) {
grid(nx = 0,
ny = theme$grid.ny,
col = colorAdjust(theme$grid.col, theme$grid.alpha),
lty = theme$grid.lty,
lwd = theme$grid.lwd)
}
# lat plot ====
line.col <- adjustcolor(line.col, line.alpha)
delta <- numeric(length(regionnames))
for (i in seq_along(regionnames)) {
vleft <- x[[grep(paste0(regionnames[i], "_L$"), names(x))]]
vright <- x[[grep(paste0(regionnames[i], "_R$"), names(x))]]
vleft.summary <- do.call(summary.fn, list(vleft))
vright.summary <- do.call(summary.fn, list(vright))
segments(x0 = 2*i - 1.2, x1 = 2*i + .2, y0 = vleft, y1 = vright,
lty = lty, lwd = lwd, col = line.col)
arrows(x0 = 2*i - 1.2, x1 = 2*i + .2,
y0 = vleft.summary, y1 = vright.summary,
length = arrowhead.length,
lty = summary.lty, lwd = summary.lwd, col = summary.col[[i]])
delta[i] <- vright.summary - vleft.summary
}
# y-axis ====
axis(side = 2,
las = theme$y.axis.las,
padj = theme$y.axis.padj,
hadj = theme$y.axis.hadj,
col.ticks = adjustcolor(theme$tick.col, theme$tick.alpha),
col = NA, # The axis line, which we want to omit
col.axis = theme$tick.labels.col, # the axis numbers i.e. tick labels
tck = theme$tck,
tcl = theme$tcl,
cex = theme$cex,
family = theme$font.family)
# regionnames ====
mtext(text = if (labelify) labelify(regionnames) else regionnames,
side = 1,
line = 1,
las = 2, at = seq(regionnames)*2 - .5,
col = theme$labs.col)
# ylab ====
if (!is.null(ylab)) mtext(ylab, side = theme$y.axis.side,
line = theme$ylab.line, cex = theme$cex,
# adj = ylab.adj,
col = theme$labs.col,
family = theme$font.family)
# deltas ====
if (deltas) {
mtext(text = paste(summary.fn, "delta"),
side = 3, line = 1.1, adj = 0,
col = adjustcolor(theme$fg, .5))
mtext(text = ddSci(delta),
side = 3, line = .2,
at = seq(regionnames)*2 - .5,
col = unlist(summary.col))
}
# Main Title ====
if (!is.null(rtenv$autolabel)) {
autolab <- autolabel[rtenv$autolabel]
main <- paste(autolab, main)
rtenv$autolabel <- rtenv$autolabel + 1
}
if (!is.null(main)) {
mtext(text = main, side = 3, line = theme$main.line,
font = theme$main.font, adj = theme$main.adj,
cex = theme$cex, col = theme$main.col,
family = theme$font.family)
}
# Outro ====
if (!is.null(filename)) dev.off()
invisible(delta)
} # rtemis::mplot3.laterality
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy.R, R/group_percent.R
\name{group_frac}
\alias{group_frac}
\alias{redist.group.percent}
\title{Calculate Group Percent by District}
\usage{
group_frac(
map,
group_pop,
total_pop = map[[attr(map, "pop_col")]],
.data = cur_plans()
)
redist.group.percent(
plans,
group_pop,
total_pop,
ncores = 1,
district_membership,
grouppop,
fullpop
)
}
\arguments{
\item{map}{a \code{\link{redist_map}} object}
\item{group_pop}{A numeric vector with the population of the group for every precinct.}
\item{total_pop}{A numeric vector with the population for every precinct.}
\item{.data}{a \code{\link{redist_plans}} object}
\item{plans}{A matrix with one row
for each precinct and one column for each map. Required.}
\item{ncores}{Number of cores to use for parallel computing. Default is 1.}
\item{district_membership}{Deprecated, use plans. A matrix with one row
for each precinct and one column for each map. Required.}
\item{grouppop}{Deprecated, use group_pop. A numeric vector with the population of the group for every precinct.}
\item{fullpop}{Deprecated, use total_pop. A numeric vector with the population for every precinct.}
}
\value{
matrix with percent for each district
}
\description{
\code{redist.group.percent} computes the percentage that a group makes up in
each district across a matrix of maps.
}
\examples{
data(fl25)
data(fl25_enum)
cd <- fl25_enum$plans[, fl25_enum$pop_dev <= 0.05]
redist.group.percent(plans = cd,
group_pop = fl25$BlackPop,
total_pop = fl25$TotPop)
}
\concept{analyze}
| /man/redist.group.percent.Rd | no_license | deonizm/redist | R | false | true | 1,653 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy.R, R/group_percent.R
\name{group_frac}
\alias{group_frac}
\alias{redist.group.percent}
\title{Calculate Group Percent by District}
\usage{
group_frac(
map,
group_pop,
total_pop = map[[attr(map, "pop_col")]],
.data = cur_plans()
)
redist.group.percent(
plans,
group_pop,
total_pop,
ncores = 1,
district_membership,
grouppop,
fullpop
)
}
\arguments{
\item{map}{a \code{\link{redist_map}} object}
\item{group_pop}{A numeric vector with the population of the group for every precinct.}
\item{total_pop}{A numeric vector with the population for every precinct.}
\item{.data}{a \code{\link{redist_plans}} object}
\item{plans}{A matrix with one row
for each precinct and one column for each map. Required.}
\item{ncores}{Number of cores to use for parallel computing. Default is 1.}
\item{district_membership}{Deprecated, use plans. A matrix with one row
for each precinct and one column for each map. Required.}
\item{grouppop}{Deprecated, use group_pop. A numeric vector with the population of the group for every precinct.}
\item{fullpop}{Deprecated, use total_pop. A numeric vector with the population for every precinct.}
}
\value{
matrix with percent for each district
}
\description{
\code{redist.group.percent} computes the percentage that a group makes up in
each district across a matrix of maps.
}
\examples{
data(fl25)
data(fl25_enum)
cd <- fl25_enum$plans[, fl25_enum$pop_dev <= 0.05]
redist.group.percent(plans = cd,
group_pop = fl25$BlackPop,
total_pop = fl25$TotPop)
}
\concept{analyze}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chromhmm_comparestates.R
\name{chromhmm_comparestates}
\alias{chromhmm_comparestates}
\title{Compute pearson correlation between the emission probabilities of different models}
\format{Numeric matrix created using \code{\%cor\%}}
\usage{
chromhmm_comparestates(model_file, reference_file)
}
\arguments{
\item{model_file}{ChromHMM model file}
\item{reference_file}{ChromHMM model file for comparison}
}
\description{
Compute pearson correlation between the emission probabilities of different models
}
| /man/chromhmm_comparestates.Rd | no_license | csiu/CEMTscripts | R | false | true | 581 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chromhmm_comparestates.R
\name{chromhmm_comparestates}
\alias{chromhmm_comparestates}
\title{Compute pearson correlation between the emission probabilities of different models}
\format{Numeric matrix created using \code{\%cor\%}}
\usage{
chromhmm_comparestates(model_file, reference_file)
}
\arguments{
\item{model_file}{ChromHMM model file}
\item{reference_file}{ChromHMM model file for comparison}
}
\description{
Compute pearson correlation between the emission probabilities of different models
}
|
.onLoad <- function(...) {
## Load user paths
try(afs_read_opts(), silent = TRUE)
try(load_key(), silent=TRUE)
}
| /R/aaa.R | no_license | nverno/sync.afs | R | false | false | 119 | r | .onLoad <- function(...) {
## Load user paths
try(afs_read_opts(), silent = TRUE)
try(load_key(), silent=TRUE)
}
|
## run_analysis.R
# Download and uzip the source file.
if(!file.exists(data)){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile="./data/CourseProject.zip", method="curl")
unzip("CourseProject.zip")
# Import training data sets
trainSubject <- read.table("./data/UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
trainY <- read.table("./data/UCI HAR Dataset/train/y_train.txt", col.names = "activity")
trainX <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
# Import test data sets
testSubject <- read.table("./data/UCI HAR Dataset/test/subject_test.txt",col.names = "subject")
testY <- read.table("./data/UCI HAR Dataset/test/y_test.txt",col.names = "activity")
testX <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
# Create combined activity data set, label data set, and subject data set.
Xcombined <- rbind(trainX, testX)
Ycombined <- rbind(trainY, testY)
Scombined <- rbind(trainSubject, testSubject)
# Extract only the measurements on the mean and standard deviation for each measurement.
features <- read.table("./data/UCI HAR Dataset/features.txt")
features[,2] <- gsub("-","", gsub("\\()","",gsub("std","Std",features[,2])))
ftExtracted <- grep("mean[^F]|mean$|Std", features[,2])
Xextracted <- Xcombined[,ftExtracted]
# Use descriptive activity names to name the activities in the data set
activityLabels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
Ycombined[,1] <- activityLabels[Ycombined[,1],2]
# Appropriately labels the data set with descriptive variable names.
names(Xextracted) <- features[ftExtracted,2]
mergedData <- cbind(Xextracted, Scombined,Ycombined)
# Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidyData <- ddply(mergedData, .(subject,activity), function(x) colMeans(x[,1:66],na.rm = TRUE))
write.table(tidyData, "tidyData.txt", row.names = FALSE)
| /run_analysis.R | no_license | miasong/Getting_and_Cleaning_data | R | false | false | 2,011 | r | ## run_analysis.R
# Download and uzip the source file.
if(!file.exists(data)){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile="./data/CourseProject.zip", method="curl")
unzip("CourseProject.zip")
# Import training data sets
trainSubject <- read.table("./data/UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
trainY <- read.table("./data/UCI HAR Dataset/train/y_train.txt", col.names = "activity")
trainX <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
# Import test data sets
testSubject <- read.table("./data/UCI HAR Dataset/test/subject_test.txt",col.names = "subject")
testY <- read.table("./data/UCI HAR Dataset/test/y_test.txt",col.names = "activity")
testX <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
# Create combined activity data set, label data set, and subject data set.
Xcombined <- rbind(trainX, testX)
Ycombined <- rbind(trainY, testY)
Scombined <- rbind(trainSubject, testSubject)
# Extract only the measurements on the mean and standard deviation for each measurement.
features <- read.table("./data/UCI HAR Dataset/features.txt")
features[,2] <- gsub("-","", gsub("\\()","",gsub("std","Std",features[,2])))
ftExtracted <- grep("mean[^F]|mean$|Std", features[,2])
Xextracted <- Xcombined[,ftExtracted]
# Use descriptive activity names to name the activities in the data set
activityLabels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
Ycombined[,1] <- activityLabels[Ycombined[,1],2]
# Appropriately labels the data set with descriptive variable names.
names(Xextracted) <- features[ftExtracted,2]
mergedData <- cbind(Xextracted, Scombined,Ycombined)
# Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidyData <- ddply(mergedData, .(subject,activity), function(x) colMeans(x[,1:66],na.rm = TRUE))
write.table(tidyData, "tidyData.txt", row.names = FALSE)
|
#--------------------------------rosenbluth
rosenbluthpage <-
fluidPage(
fluidRow(
column(width = 8,
wellPanel(class = "bw sc texts",
style = "height: 760px;",
div(class = "metric_header", "Rosenbluth Index"),
div(class = "metric_text",
p("The Rosenbluth index (Rosenbluth, 1955) draws on the rank order of components and compares each component\u0027s unit share to the probability that two units belong to the same component (Coulter, 1989). It is a measure of absolute inequality (i.e., the number of components, k, defines its lower bound)."),
p("Because of its rank-order definition, the Rosenbluth index can be plotted in a diagram similar to the Lorenz curve. However, the components are sorted in descending order (not, as otherwise customary, in ascending order), so that the first entry in the diagram contains the highest (not the smallest) unit share. Also, the x-axis of the Rosenbluth index diagram does not cover the cumulative component shares in percent (1% to 100%), but rather the component numbers (1 to k) instead. For five components, the x-axis would range from 1 to 5, and the total area of the diagram would equal 5\u002A1 = 5. The diagonal in this rectangle again indicates the line of equality."),
p("Like the Gini index, the Rosenbluth index is an area measure. It is the reciprocal value of twice the area (A; red area in the rightmost diagram) above the concentration curve, R = 1/(2A). Thus, the Rosenbluth index can also be expressed in terms of the uncorrected Gini index (Marfels, 1971)."),
p("In the case of maximum equality, the curve coincides with the line of equality, and the area above the curve is identical to the right-angled triangle above the line of equality. The lower bound of the Rosenbluth index is thus 1/[2\u002A(k/2)] = 1/k. In the case of maximum inequality, the curve rises immediately steeply from (0, 0) to (1, 1), and then runs horizontally to the rightmost point (k, 1). The resulting (minimum) area above the concentration curve is then defined by a right-angled triangle having the corner points (0, 0), (0, 1), and (1, 1), thus spanning a total area of (1\u002A1)/2 = 0.5, in which case the Rosenbluth index yields a value of 1/(2\u002A0.5) = 1.")
),
br(),
hr(),
br(),
div(class = "instructions",
p("On the right side you can move five sliders to learn about the construction of the Rosenbluth diagram and the calculation of the Rosenbluth index. The sliders represent the units (e.g., scale or test scores, reaction times in ms, or counts of the variable values of a categorical variable) of five different components (e.g., individual persons or the variable values of a categorical variable)."),
p("See what area is coloured in the rightmost diagram to compute the Rosenbluth index."),
p("Create a situation, in which one component has all units or in which all five components have equal numbers of units."),
br(),
br()
)
)
),
column(width = 1),
column(width = 4,
wellPanel(class = "bw",
# div(class = "inter_header", "Set the revenue"),
sliderInput("ros1", "First Component's Units", min = 0, max = 1000, step = 10, value = 400),
sliderInput("ros2", "Second Component's Units", min = 0, max = 1000, step = 10, value = 200),
sliderInput("ros3", "Third Component's Units", min = 0, max = 1000, step = 10, value = 100),
sliderInput("ros4", "Fourth Component's Units", min = 0, max = 1000, step = 10, value = 50),
sliderInput("ros5", "Fifth Component's Units", min = 0, max = 1000, step = 10, value = 0)
),
wellPanel(class = "bw",
fluidRow(
valueBoxOutput(width = 12, "ros_index")
)
)
),
),
fluidRow(
box(title = "1. Sort (Descending) and Cumulate the Values", solidHeader = T, width = 4, plotOutput("ros_out1")),
box(title = "2. Connect the Points", solidHeader = T, width = 4, plotOutput("ros_out2")),
box(title = "3. Fill the Area above the Line", solidHeader = T, width = 4, plotOutput("ros_out3")),
),
tags$footer(foot)
) | /Rosenbluth ui.R | no_license | guitaric/Visualizing-Inequality | R | false | false | 4,794 | r |
#--------------------------------rosenbluth
rosenbluthpage <-
fluidPage(
fluidRow(
column(width = 8,
wellPanel(class = "bw sc texts",
style = "height: 760px;",
div(class = "metric_header", "Rosenbluth Index"),
div(class = "metric_text",
p("The Rosenbluth index (Rosenbluth, 1955) draws on the rank order of components and compares each component\u0027s unit share to the probability that two units belong to the same component (Coulter, 1989). It is a measure of absolute inequality (i.e., the number of components, k, defines its lower bound)."),
p("Because of its rank-order definition, the Rosenbluth index can be plotted in a diagram similar to the Lorenz curve. However, the components are sorted in descending order (not, as otherwise customary, in ascending order), so that the first entry in the diagram contains the highest (not the smallest) unit share. Also, the x-axis of the Rosenbluth index diagram does not cover the cumulative component shares in percent (1% to 100%), but rather the component numbers (1 to k) instead. For five components, the x-axis would range from 1 to 5, and the total area of the diagram would equal 5\u002A1 = 5. The diagonal in this rectangle again indicates the line of equality."),
p("Like the Gini index, the Rosenbluth index is an area measure. It is the reciprocal value of twice the area (A; red area in the rightmost diagram) above the concentration curve, R = 1/(2A). Thus, the Rosenbluth index can also be expressed in terms of the uncorrected Gini index (Marfels, 1971)."),
p("In the case of maximum equality, the curve coincides with the line of equality, and the area above the curve is identical to the right-angled triangle above the line of equality. The lower bound of the Rosenbluth index is thus 1/[2\u002A(k/2)] = 1/k. In the case of maximum inequality, the curve rises immediately steeply from (0, 0) to (1, 1), and then runs horizontally to the rightmost point (k, 1). The resulting (minimum) area above the concentration curve is then defined by a right-angled triangle having the corner points (0, 0), (0, 1), and (1, 1), thus spanning a total area of (1\u002A1)/2 = 0.5, in which case the Rosenbluth index yields a value of 1/(2\u002A0.5) = 1.")
),
br(),
hr(),
br(),
div(class = "instructions",
p("On the right side you can move five sliders to learn about the construction of the Rosenbluth diagram and the calculation of the Rosenbluth index. The sliders represent the units (e.g., scale or test scores, reaction times in ms, or counts of the variable values of a categorical variable) of five different components (e.g., individual persons or the variable values of a categorical variable)."),
p("See what area is coloured in the rightmost diagram to compute the Rosenbluth index."),
p("Create a situation, in which one component has all units or in which all five components have equal numbers of units."),
br(),
br()
)
)
),
column(width = 1),
column(width = 4,
wellPanel(class = "bw",
# div(class = "inter_header", "Set the revenue"),
sliderInput("ros1", "First Component's Units", min = 0, max = 1000, step = 10, value = 400),
sliderInput("ros2", "Second Component's Units", min = 0, max = 1000, step = 10, value = 200),
sliderInput("ros3", "Third Component's Units", min = 0, max = 1000, step = 10, value = 100),
sliderInput("ros4", "Fourth Component's Units", min = 0, max = 1000, step = 10, value = 50),
sliderInput("ros5", "Fifth Component's Units", min = 0, max = 1000, step = 10, value = 0)
),
wellPanel(class = "bw",
fluidRow(
valueBoxOutput(width = 12, "ros_index")
)
)
),
),
fluidRow(
box(title = "1. Sort (Descending) and Cumulate the Values", solidHeader = T, width = 4, plotOutput("ros_out1")),
box(title = "2. Connect the Points", solidHeader = T, width = 4, plotOutput("ros_out2")),
box(title = "3. Fill the Area above the Line", solidHeader = T, width = 4, plotOutput("ros_out3")),
),
tags$footer(foot)
) |
################# Identify Regions under Selection from Sliding Window Analysis #########
#
# This script will filter the output file from sliding window analysis...
# to include only regions where the FST average goes above or below the confidence interval
# You can then compare overlapping regions between two analyses on plots per linkage group
# You can also identify which loci are within the selected regions
#
# MF 3/22/2018 for PCod Compare Project
#
#########################################################################################
# Install Packages --------------------------------------------------------
install.packages("dplyr")
install.packages("ggplot2")
library(readr)
library(ggplot2)
library(dplyr)
# Load Data -------------------------------------------------------------
setwd("D:/Pacific cod/DataAnalysis/PCod-Compare-repo/analyses/SlidingWindow")
source("SlidingWindow_FindMarkers_Function.R")
source("Plot_SlidingWindowAnalysis_Functions.R")
east <- read_delim("East/batch_8_final_filtered_east_globalFST_kernel_smoothing_1e+05_bootstraps_sigma_250000_div150_FILTERED.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
head(east)
west <- read_delim("West/batch_8_final_filtered_west_2reg_kernel_smoothing_1e+05_bootstraps_sigma_250000_div150_FILTERED.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
head(west)
# Add columns for selection -------------------------------------------------------------
east_selection <- east %>%
mutate(positive = ifelse(`Fst.Fct` > upper_95, "1", "0")) %>%
mutate(negative = ifelse(`Fst.Fct` < lower_95, "1", "0")) %>%
mutate(selection = ifelse(positive == 1, "Yes", "No"))
head(east_selection)
west_selection <- west %>%
mutate(positive = ifelse(`Fst.Fct` > upper_95, "1", "0")) %>%
mutate(negative = ifelse(`Fst.Fct` < lower_95, "1", "0")) %>%
mutate(selection = ifelse(positive == 1, "Yes", "No"))
head(west_selection)
# Visualize Fst v. Bootstrap CI ----------------------------------------------
## positive selection: Fst above upper 95% confidence interval
ggplot(data=east_selection, aes(x = `Fst.Fct`, y = upper_95)) +
geom_point(aes(color=positive)) +
labs(title="East Sliding Window\nPositive Selection")
## negative selection: Fst below lower 95% confidence interval
ggplot(data=east_selection, aes(x = `Fst.Fct`, y = lower_95)) +
geom_point(aes(color=negative)) +
labs(title="East Sliding Window\nNegative Selection")
## positive selection: Fst above upper 95% confidence interval
ggplot(data=west_selection, aes(x = `Fst.Fct`, y = upper_95)) +
geom_point(aes(color=positive)) +
ylim(-0.1,0.5) +
xlim(-0.1,0.5) +
labs(title="West Sliding Window\nPositive Selection")
## negative selection: Fst below lower 95% confidence interval
ggplot(data=west_selection, aes(x = `Fst.Fct`, y = lower_95)) +
geom_point(aes(color=negative)) +
ylim(-0.1,0.1) +
xlim(-0.1,0.5) +
labs(title="West Sliding Window\nNegative Selection")
# Visualize P values ------------------------------------------------------
ggplot(data=east_selection, aes(x = `Fst.Fct`, y = pvalue)) +
geom_point(aes(color=selection)) +
ylim(-0.1,1) +
xlim(-0.1,1) +
labs(title="East Sliding Window\nP values")
ggplot(data=west_selection, aes(x = `Fst.Fct`, y = pvalue)) +
geom_point(aes(color=selection)) +
ylim(-0.1,1) +
xlim(-0.1,0.5) +
labs(title="West Sliding Window\nP values")
# Match Data sets on Plot ---------------------------------------------------------
## plot all chromosomes using overlay function
colnames(east_selection) <- c("chromosome","position","Fst/Fct","Mean_boostrap","lower_95","upper_95","pvalue","positive","negative","selection")
colnames(west_selection) <- c("chromosome","position","Fst/Fct","Mean_boostrap","lower_95","upper_95","pvalue","positive","negative","selection")
just_plot_overlay_diverge(data1 = east_selection, data2 = west_selection, Nb_divisions = 150, which.chromosome.analysis="all", which.chromosome.plot="all",export = TRUE, name="plots/SWA_East_West_Divergence_Overlay")
# Filter Data and Write to File --------------------------------------------
east_filter <- east_selection %>%
filter(selection == "Yes")
dim(east_filter)
west_filter <- west_selection %>%
filter(selection == "Yes")
dim(west_filter)
# Find Loci Within Windows ------------------------------------------------
## read in SWA input file, sorted
east_marker_data = read.table("batch_8_SWA_input_east_sorted.txt", header = TRUE, sep = "\t")
west_marker_data = read.table("batch_8_SWA_input_west_sorted.txt", header = TRUE, sep = "\t")
## function
find_markers_in_window(marker_data = east_marker_data, window_size= 250000, divisions = 150, output = "east_SWA_Num_Name_Loci_Per_Window.txt")
find_markers_in_window(marker_data = west_marker_data, window_size= 250000, divisions = 150, output = "West_SWA_Num_Name_Loci_Per_Window.txt")
# Filter Loci within Windows by Selection Region --------------------------
## east data
e_find_markers_output <- read.delim("East_Num_Name_Loci_Per_Window.txt", sep = "\t", header= FALSE, colClasses = c("character", "numeric", "numeric", "character"))
colnames(e_find_markers_output) <- c("chromosome", "position", "num_markers", "loci_names")
View(e_find_markers_output)
e_find_markers_filtered <- filter(e_find_markers_output, position %in% east_filter$position)
dim(e_find_markers_filtered)
View(e_find_markers_filtered)
dim(east_filter)
write.table(e_find_markers_filtered, "East_SWA_SelectionRegions_Markers.txt", sep = "\t",
row.names=FALSE, quote=FALSE)
## west data
w_find_markers_output <- read.delim("West_SWA_Num_Name_Loci_Per_Window.txt", sep = "\t", header= FALSE, colClasses = c("character", "numeric", "numeric", "character"))
colnames(w_find_markers_output) <- c("chromosome", "position", "num_markers", "loci_names")
View(w_find_markers_output)
w_find_markers_filtered <- filter(w_find_markers_output, position %in% west_filter$position)
dim(w_find_markers_filtered)
dim(west_filter)
write.table(w_find_markers_filtered, "West_SWA_SelectionRegions_Markers.txt", sep = "\t",
row.names=FALSE, quote=FALSE)
# Output for Genome Island Width ------------------------------------------
east_filter <- east_selection %>%
filter(selection == "Yes")
dim(east_filter)
west_filter <- west_selection %>%
filter(selection == "Yes")
dim(west_filter)
write.table(west_filter, "West_SWA_SelectionRegions_Width.txt", sep = "\t",
quote=FALSE)
write.table(east_filter, "East_SWA_SelectionRegions_Width.txt", sep = "\t",
quote=FALSE)
View(west_selection)
| /analyses/SlidingWindow/ID_Divergence_Regions_SlidingWindow_east_v_west_MF.R | no_license | mfisher5/PCod-Compare-repo | R | false | false | 6,621 | r | ################# Identify Regions under Selection from Sliding Window Analysis #########
#
# This script will filter the output file from sliding window analysis...
# to include only regions where the FST average goes above or below the confidence interval
# You can then compare overlapping regions between two analyses on plots per linkage group
# You can also identify which loci are within the selected regions
#
# MF 3/22/2018 for PCod Compare Project
#
#########################################################################################
# Install Packages --------------------------------------------------------
install.packages("dplyr")
install.packages("ggplot2")
library(readr)
library(ggplot2)
library(dplyr)
# Load Data -------------------------------------------------------------
setwd("D:/Pacific cod/DataAnalysis/PCod-Compare-repo/analyses/SlidingWindow")
source("SlidingWindow_FindMarkers_Function.R")
source("Plot_SlidingWindowAnalysis_Functions.R")
east <- read_delim("East/batch_8_final_filtered_east_globalFST_kernel_smoothing_1e+05_bootstraps_sigma_250000_div150_FILTERED.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
head(east)
west <- read_delim("West/batch_8_final_filtered_west_2reg_kernel_smoothing_1e+05_bootstraps_sigma_250000_div150_FILTERED.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
head(west)
# Add columns for selection -------------------------------------------------------------
east_selection <- east %>%
mutate(positive = ifelse(`Fst.Fct` > upper_95, "1", "0")) %>%
mutate(negative = ifelse(`Fst.Fct` < lower_95, "1", "0")) %>%
mutate(selection = ifelse(positive == 1, "Yes", "No"))
head(east_selection)
west_selection <- west %>%
mutate(positive = ifelse(`Fst.Fct` > upper_95, "1", "0")) %>%
mutate(negative = ifelse(`Fst.Fct` < lower_95, "1", "0")) %>%
mutate(selection = ifelse(positive == 1, "Yes", "No"))
head(west_selection)
# Visualize Fst v. Bootstrap CI ----------------------------------------------
## positive selection: Fst above upper 95% confidence interval
ggplot(data=east_selection, aes(x = `Fst.Fct`, y = upper_95)) +
geom_point(aes(color=positive)) +
labs(title="East Sliding Window\nPositive Selection")
## negative selection: Fst below lower 95% confidence interval
ggplot(data=east_selection, aes(x = `Fst.Fct`, y = lower_95)) +
geom_point(aes(color=negative)) +
labs(title="East Sliding Window\nNegative Selection")
## positive selection: Fst above upper 95% confidence interval
ggplot(data=west_selection, aes(x = `Fst.Fct`, y = upper_95)) +
geom_point(aes(color=positive)) +
ylim(-0.1,0.5) +
xlim(-0.1,0.5) +
labs(title="West Sliding Window\nPositive Selection")
## negative selection: Fst below lower 95% confidence interval
ggplot(data=west_selection, aes(x = `Fst.Fct`, y = lower_95)) +
geom_point(aes(color=negative)) +
ylim(-0.1,0.1) +
xlim(-0.1,0.5) +
labs(title="West Sliding Window\nNegative Selection")
# Visualize P values ------------------------------------------------------
ggplot(data=east_selection, aes(x = `Fst.Fct`, y = pvalue)) +
geom_point(aes(color=selection)) +
ylim(-0.1,1) +
xlim(-0.1,1) +
labs(title="East Sliding Window\nP values")
ggplot(data=west_selection, aes(x = `Fst.Fct`, y = pvalue)) +
geom_point(aes(color=selection)) +
ylim(-0.1,1) +
xlim(-0.1,0.5) +
labs(title="West Sliding Window\nP values")
# Match Data sets on Plot ---------------------------------------------------------
## plot all chromosomes using overlay function
colnames(east_selection) <- c("chromosome","position","Fst/Fct","Mean_boostrap","lower_95","upper_95","pvalue","positive","negative","selection")
colnames(west_selection) <- c("chromosome","position","Fst/Fct","Mean_boostrap","lower_95","upper_95","pvalue","positive","negative","selection")
just_plot_overlay_diverge(data1 = east_selection, data2 = west_selection, Nb_divisions = 150, which.chromosome.analysis="all", which.chromosome.plot="all",export = TRUE, name="plots/SWA_East_West_Divergence_Overlay")
# Filter Data and Write to File --------------------------------------------
east_filter <- east_selection %>%
filter(selection == "Yes")
dim(east_filter)
west_filter <- west_selection %>%
filter(selection == "Yes")
dim(west_filter)
# Find Loci Within Windows ------------------------------------------------
## read in SWA input file, sorted
east_marker_data = read.table("batch_8_SWA_input_east_sorted.txt", header = TRUE, sep = "\t")
west_marker_data = read.table("batch_8_SWA_input_west_sorted.txt", header = TRUE, sep = "\t")
## function
find_markers_in_window(marker_data = east_marker_data, window_size= 250000, divisions = 150, output = "east_SWA_Num_Name_Loci_Per_Window.txt")
find_markers_in_window(marker_data = west_marker_data, window_size= 250000, divisions = 150, output = "West_SWA_Num_Name_Loci_Per_Window.txt")
# Filter Loci within Windows by Selection Region --------------------------
## east data
e_find_markers_output <- read.delim("East_Num_Name_Loci_Per_Window.txt", sep = "\t", header= FALSE, colClasses = c("character", "numeric", "numeric", "character"))
colnames(e_find_markers_output) <- c("chromosome", "position", "num_markers", "loci_names")
View(e_find_markers_output)
e_find_markers_filtered <- filter(e_find_markers_output, position %in% east_filter$position)
dim(e_find_markers_filtered)
View(e_find_markers_filtered)
dim(east_filter)
write.table(e_find_markers_filtered, "East_SWA_SelectionRegions_Markers.txt", sep = "\t",
row.names=FALSE, quote=FALSE)
## west data
w_find_markers_output <- read.delim("West_SWA_Num_Name_Loci_Per_Window.txt", sep = "\t", header= FALSE, colClasses = c("character", "numeric", "numeric", "character"))
colnames(w_find_markers_output) <- c("chromosome", "position", "num_markers", "loci_names")
View(w_find_markers_output)
w_find_markers_filtered <- filter(w_find_markers_output, position %in% west_filter$position)
dim(w_find_markers_filtered)
dim(west_filter)
write.table(w_find_markers_filtered, "West_SWA_SelectionRegions_Markers.txt", sep = "\t",
row.names=FALSE, quote=FALSE)
# Output for Genome Island Width ------------------------------------------
east_filter <- east_selection %>%
filter(selection == "Yes")
dim(east_filter)
west_filter <- west_selection %>%
filter(selection == "Yes")
dim(west_filter)
write.table(west_filter, "West_SWA_SelectionRegions_Width.txt", sep = "\t",
quote=FALSE)
write.table(east_filter, "East_SWA_SelectionRegions_Width.txt", sep = "\t",
quote=FALSE)
View(west_selection)
|
Resp_Prod <- c(
1.04,1.05,1.06,1.07,1.05,
1.10,1.11,1.09,1.07,1.08,
1.21,1.19,1.18,1.17,1.21
)
Trt_Marcas <- rep( c("Marca1", "Marca2", "Marca3"), each = 5)
Blq_Mediciones <- rep(c("Medicion1", "Medicion2", "Medicion3", "Medicion4", "Medicion5"), times = 3)
Datos<- data.frame(Resp_Prod, Trt_Marcas, Blq_Mediciones)
head(Datos)
tail(Datos)
modelo <- aov(Resp_Prod~ Trt_Marcas+ Blq_Mediciones, data = Datos)
summary(modelo)
TukeyHSD(modelo)
par(mar=c(6,11,3,1))
plot(TukeyHSD(modelo,'Trt_Marcas'), las=1, col="brown")
library(agricolae)
Prueba <- HSD.test(modelo, "Trt_Marcas", group=TRUE)
Prueba$groups
qqnorm(modelo$residuals)
qqline(modelo$residuals)
shapiro.test(modelo$residuals)
| /Tarea3_AOVRB/source/ejercicio2 (1).R | no_license | CarlosRDGZ/Metodos_Estadisticos_2018 | R | false | false | 740 | r | Resp_Prod <- c(
1.04,1.05,1.06,1.07,1.05,
1.10,1.11,1.09,1.07,1.08,
1.21,1.19,1.18,1.17,1.21
)
Trt_Marcas <- rep( c("Marca1", "Marca2", "Marca3"), each = 5)
Blq_Mediciones <- rep(c("Medicion1", "Medicion2", "Medicion3", "Medicion4", "Medicion5"), times = 3)
Datos<- data.frame(Resp_Prod, Trt_Marcas, Blq_Mediciones)
head(Datos)
tail(Datos)
modelo <- aov(Resp_Prod~ Trt_Marcas+ Blq_Mediciones, data = Datos)
summary(modelo)
TukeyHSD(modelo)
par(mar=c(6,11,3,1))
plot(TukeyHSD(modelo,'Trt_Marcas'), las=1, col="brown")
library(agricolae)
Prueba <- HSD.test(modelo, "Trt_Marcas", group=TRUE)
Prueba$groups
qqnorm(modelo$residuals)
qqline(modelo$residuals)
shapiro.test(modelo$residuals)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dairy_fin_charts.R
\docType{data}
\name{dairy_fin_charts}
\alias{dairy_fin_charts}
\title{Financial Charts of Top Chinese Dairy Industry Companies(2006-2017)}
\format{A dataframe containing 120 rows and 5 columns}
\usage{
data(dairy_fin_charts)
}
\description{
Income, profit and ROE of ten Chinese dairy companies trade on stock market,
from 2006 to 2017.
}
\references{
Wind database of Chinese and Hong Kong's stock market data
}
\keyword{datasets}
| /man/dairy_fin_charts.Rd | no_license | jkang37/dppbar | R | false | true | 530 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dairy_fin_charts.R
\docType{data}
\name{dairy_fin_charts}
\alias{dairy_fin_charts}
\title{Financial Charts of Top Chinese Dairy Industry Companies(2006-2017)}
\format{A dataframe containing 120 rows and 5 columns}
\usage{
data(dairy_fin_charts)
}
\description{
Income, profit and ROE of ten Chinese dairy companies trade on stock market,
from 2006 to 2017.
}
\references{
Wind database of Chinese and Hong Kong's stock market data
}
\keyword{datasets}
|
#---------------------------------------------------
# Main file to replicate Empirically-Driven Simulations
# Sant'Anna, Song and Xu (2021),
# Simulations based on R 4.1
# Updated: 02/06/2021
#---------------------------------------------------
#-----------------------------------------------------------------------------
# Startup - clear memory, load packages, and set parameters
# Clear memory
rm(list = ls())
#-----------------------------------------------------------------------------
# Basic parameters for the simulation - Doesn't change over setups
ncores <- 36 # Number of cores to use in parallel
seed1 <- 07232021 # Set initial seed (guaranteed reproducibility)
nrep <- 1000 # Monte Carlo replications
n <- 1000
set.seed(seed1)
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# load the necessary libraries
library(here)
library(foreach)
library(doSNOW)
library(doRNG)
library(dplyr)
# MAKE SURE YOU HAVE INSTALLED THE IPS library
#devtools::install_github("pedrohcgs/IPS")
library(IPS)
library(CBPS)
library(estimatr)
library(foreign)
#devtools::install_github("echasnovski/pdqr")
library(pdqr)
#-----------------------------------------------------------------------------
# Set seed
set.seed(seed1)
#-----------------------------------------------------------------------------
# Source Auxiliary functions
# Influence functions for CBPS and GLM-based Pscores
source(here("Codes", "Inflc_glm.R"))
source(here("Codes", "Inflc_CBPS.R"))
#----------------------------------------------------------------------------
###########################################################################
# Load data from Chernozhukov and Hansen (2004)
load(here("Applications/401k/data/data401k.RData"))
data401k <- data401k[data401k$inc>0,]
#----------------------------------------------------------------------------
# Rescale some variables
data401k$age <- (data401k$age -25)/(64-25)
data401k$inc <- (data401k$inc+2652)/(242124+2652)
data401k$fsize <- data401k$fsize/13
data401k$educ <- data401k$educ/18
#----------------------------------------------------------------------------
# Generate some covariate transformations
data401k$incsq <- data401k$inc^2
data401k$loginc <- log(data401k$inc)
data401k$logincsq <- data401k$loginc^2
data401k$agesq <- data401k$age^2
data401k$educsq <- data401k$educ^2
data401k$fsizesq <- data401k$fsize^2
#----------------------------------------------------------------------------
# Keep only variables we will use
cts_variables_names <- c("inc", "loginc", "age", "fsize", "educ")
binary_variables_names <- c("hown","marr", "twoearn", "db", "pira")
covariates_names <- c(cts_variables_names,binary_variables_names )
outcomes_names <- c("tw", "net_tfa")
treat_names <- c("e401", "p401")
all_variables <- c(outcomes_names,
treat_names,
covariates_names)
df <- data401k[,colnames(data401k) %in% all_variables]
covariates <- df[,covariates_names]
#----------------------------------------------------------------------------
# Sample size of original data
n_total <- dim(df)[1]
#----------------------------------------------------------------------------
# Instrument propensity score Specifications
# instrument_ps_formula <- as.formula("e401 ~ (inc + loginc + age +fsize + educ+ hown +
# marr+ twoearn+ db+ pira)")
instrument_ps_formula <- as.formula("e401 ~ (inc + loginc + age +fsize + educ+ hown +
marr+ twoearn+ db+ pira) +
I(inc^2) + I(loginc^2) + I(age^2) +
I(fsize^2) + I(educ^2)")
instrument_ps <- glm(formula = instrument_ps_formula,
family = binomial(link = "logit"),
data = df,
x = TRUE)
X_prime_Beta_inst_ps <- instrument_ps$x %*% ( instrument_ps$coefficients)
df$instrument_ps <- 1/(1+exp(- X_prime_Beta_inst_ps))
#----------------------------------------------------------------------------
# Generate potential outcomes
# Outcome regressions for intention to treat
# OR_itt_formula <- as.formula("net_tfa ~ (inc + loginc + age +fsize + educ+ hown +
# marr+ twoearn+ db+ pira)")
OR_itt_formula <- as.formula("net_tfa ~ (inc + loginc + age +fsize + educ+ hown +
marr+ twoearn+ db+ pira) +
I(inc^2) + I(loginc^2) + I(age^2) +
I(fsize^2) + I(educ^2)")
out_reg_d0 <- lm(OR_itt_formula, data = df[df$e401==0,])
out_reg_d1 <- lm(OR_itt_formula, data = df[df$e401==1,])
X_or <- model.matrix(OR_itt_formula, data = df)
# get conditional means
df$out_reg_d0 <- X_or %*% ( out_reg_d0$coefficients)
df$out_reg_d1 <- X_or %*% ( out_reg_d1$coefficients)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Generate potential outcomes
Y0 <- df$out_reg_d0/1000 + rnorm(n_total)
Y1 <- df$out_reg_d1/1000 + rnorm(n_total)
df$sorting <- (Y1 - Y0)
#----------------------------------------------------------------------------
# propensity score Specifications
ps_formula <- as.formula("p401 ~ sorting")
ps_treat <- glm(formula = ps_formula,
family = binomial(link = "logit"),
data = df[df$e401==1,],
x = TRUE)
ps_treat <- ps_treat$coefficients
#ps_treat <- c(0.811, 0.055)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Keep only necessary portions of data
df <- data.frame(
out_reg_d0 = df$out_reg_d0,
out_reg_d1 = df$out_reg_d1,
instrument_ps_baseline = df$instrument_ps,
covariates
)
# df <- df[order(df$inc),]
#----------------------------------------------------------------------------
# True ATE
ate_true <- 0.8106185
# True QTE (based on simulations) - linear OR
# qte_true <- c(-474.4316 , 3420.1549 , 9453.3591, 9544.2945, 15063.9459 )
# NOnlinear OR
qte_true <- c( -0.1476849 , 0.2006515 , 0.7719787 , 1.3601936, 2.0244570 )
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Now, we have all parameters for the simulation
# Let's start
cl <- parallel::makeCluster(ncores)
registerDoSNOW(cl)
# Progress bar
pb <- txtProgressBar(max = nrep, style = 3)
progress <- function(n) setTxtProgressBar(pb,n)
opts <- list(progress=progress)
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#Start the MONTE CARLO loop
MC_sims <- foreach(nn = 1:nrep,
.options.snow = opts,
.errorhandling='remove') %dorng%
{
# Draw covariates (which implies we draw instrument pscores, and OR, too)
id_sim <- sample(1:n_total, n, replace = TRUE )
df_sim <- df[(id_sim),]
#xcov <- df_sim[,-(1:3)]
xcov <- model.matrix(as.formula("~ (inc + loginc + age +fsize + educ+ hown +
marr+ twoearn+ db+ pira) +
I(inc^2) + I(loginc^2) + I(age^2) +
I(fsize^2) + I(educ^2)"), data = df_sim)
# Y0 <- df_sim$out_reg_d0 + stats::rnorm(n)
# Y1 <- df_sim$out_reg_d1 + stats::rnorm(n)
#
Y0 <- df_sim$out_reg_d0/1000 + stats::rnorm(n)
Y1 <- df_sim$out_reg_d1/1000 + stats::rnorm(n)
Z <- stats::rbinom(n, 1, df_sim$instrument_ps)
# Generate pscore and Treatment Status
endog_index <- cbind(1, (Y1-Y0)) %*% ps_treat
ps <- 1/(1+exp(- endog_index))
D1 <- stats::rbinom(n, size=1, prob = ps)
D0 <- rep(0,n)
treat <- Z*D1 + (1-Z) * D0
Y <- Y0 * (1 - treat) + Y1 * treat
if(sum(treat) < 20) next
if(sum(Z) < 50) next
#----------------------------------------------------------------------------
# fit different estimators for the propensity score
#Logit GLM estimator
fit.glm <- glm.fit(x = xcov, y = Z,
family = binomial(link = "logit"))
# GLM
glm.lin.rep <- inflc_glm(Z, xcov, fit.glm$fitted.values)
late_glm <- IPS::LATE(Y,Z, treat, xcov, fit.glm$fitted.values, glm.lin.rep)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# compute QTE using different estimators
tau <- c(0.1, 0.25, 0.5, 0.75, 0.9)
# Bandwidth
bw = "nrd0"
# GLM
lqte_glm <- IPS::LQTE(Y, Z, treat, xcov, fit.glm$fitted.values,
glm.lin.rep, tau, bw = bw)
#----------------------------------------------------------------------------
#Return point estimates and standard errors
out <- c(
# Point estimates
late_glm$late,
lqte_glm$lqte,
# std. errors
late_glm$late.se,
lqte_glm$lqte.se
)
return(out)
}
#-----------------------------------------------------------------------------
#Stop the cluster
stopCluster(cl)
#-----------------------------------------------------------------------------
#Put the Monte Carlo Results in an matrix
mc <- do.call(rbind, MC_sims)
mc_all <- as.matrix(mc)
# mode(mc) = "numeric"
# mc2 <- as.matrix(MC_sims, nrow = nrep, ncol = 72)
#-----------------------------------------------------------------------------
# Mean in the Monte Carlo
mean.mc <- base::colMeans(mc, na.rm = TRUE)
true_effects <- c(rep(ate_true, 1),
rep(qte_true[1], 1),
rep(qte_true[2], 1),
rep(qte_true[3], 1),
rep(qte_true[4], 1),
rep(qte_true[5], 1)
)
true_effects <- matrix(true_effects, nrow = nrow(mc), ncol = 6, byrow = TRUE)
bias_mc <- mean.mc[1:6] - colMeans(true_effects)
RMSE_mc <- sqrt(base::colMeans((mc[,1:6] - true_effects)^2, na.rm = TRUE ))
abs_bias_mc <- (base::colMeans(abs(mc[,1:6] - true_effects), na.rm = TRUE))
coverage <- colMeans(((mc[,1:6] - 1.96*mc[,7:12]) <= true_effects) *
((mc[,1:6] + 1.96*mc[,7:12]) >= true_effects), na.rm = TRUE)
| /Simulations/401k/Old_not_share/lqte_MLE.R | permissive | lnsongxf/IPS_replication | R | false | false | 10,650 | r | #---------------------------------------------------
# Main file to replicate Empirically-Driven Simulations
# Sant'Anna, Song and Xu (2021),
# Simulations based on R 4.1
# Updated: 02/06/2021
#---------------------------------------------------
#-----------------------------------------------------------------------------
# Startup - clear memory, load packages, and set parameters
# Clear memory
rm(list = ls())
#-----------------------------------------------------------------------------
# Basic parameters for the simulation - Doesn't change over setups
ncores <- 36 # Number of cores to use in parallel
seed1 <- 07232021 # Set initial seed (guaranteed reproducibility)
nrep <- 1000 # Monte Carlo replications
n <- 1000
set.seed(seed1)
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# load the necessary libraries
library(here)
library(foreach)
library(doSNOW)
library(doRNG)
library(dplyr)
# MAKE SURE YOU HAVE INSTALLED THE IPS library
#devtools::install_github("pedrohcgs/IPS")
library(IPS)
library(CBPS)
library(estimatr)
library(foreign)
#devtools::install_github("echasnovski/pdqr")
library(pdqr)
#-----------------------------------------------------------------------------
# Set seed
set.seed(seed1)
#-----------------------------------------------------------------------------
# Source Auxiliary functions
# Influence functions for CBPS and GLM-based Pscores
source(here("Codes", "Inflc_glm.R"))
source(here("Codes", "Inflc_CBPS.R"))
#----------------------------------------------------------------------------
###########################################################################
# Load data from Chernozhukov and Hansen (2004)
load(here("Applications/401k/data/data401k.RData"))
data401k <- data401k[data401k$inc>0,]
#----------------------------------------------------------------------------
# Rescale some variables
data401k$age <- (data401k$age -25)/(64-25)
data401k$inc <- (data401k$inc+2652)/(242124+2652)
data401k$fsize <- data401k$fsize/13
data401k$educ <- data401k$educ/18
#----------------------------------------------------------------------------
# Generate some covariate transformations
data401k$incsq <- data401k$inc^2
data401k$loginc <- log(data401k$inc)
data401k$logincsq <- data401k$loginc^2
data401k$agesq <- data401k$age^2
data401k$educsq <- data401k$educ^2
data401k$fsizesq <- data401k$fsize^2
#----------------------------------------------------------------------------
# Keep only variables we will use
cts_variables_names <- c("inc", "loginc", "age", "fsize", "educ")
binary_variables_names <- c("hown","marr", "twoearn", "db", "pira")
covariates_names <- c(cts_variables_names,binary_variables_names )
outcomes_names <- c("tw", "net_tfa")
treat_names <- c("e401", "p401")
all_variables <- c(outcomes_names,
treat_names,
covariates_names)
df <- data401k[,colnames(data401k) %in% all_variables]
covariates <- df[,covariates_names]
#----------------------------------------------------------------------------
# Sample size of original data
n_total <- dim(df)[1]
#----------------------------------------------------------------------------
# Instrument propensity score Specifications
# instrument_ps_formula <- as.formula("e401 ~ (inc + loginc + age +fsize + educ+ hown +
# marr+ twoearn+ db+ pira)")
instrument_ps_formula <- as.formula("e401 ~ (inc + loginc + age +fsize + educ+ hown +
marr+ twoearn+ db+ pira) +
I(inc^2) + I(loginc^2) + I(age^2) +
I(fsize^2) + I(educ^2)")
instrument_ps <- glm(formula = instrument_ps_formula,
family = binomial(link = "logit"),
data = df,
x = TRUE)
X_prime_Beta_inst_ps <- instrument_ps$x %*% ( instrument_ps$coefficients)
df$instrument_ps <- 1/(1+exp(- X_prime_Beta_inst_ps))
#----------------------------------------------------------------------------
# Generate potential outcomes
# Outcome regressions for intention to treat
# OR_itt_formula <- as.formula("net_tfa ~ (inc + loginc + age +fsize + educ+ hown +
# marr+ twoearn+ db+ pira)")
OR_itt_formula <- as.formula("net_tfa ~ (inc + loginc + age +fsize + educ+ hown +
marr+ twoearn+ db+ pira) +
I(inc^2) + I(loginc^2) + I(age^2) +
I(fsize^2) + I(educ^2)")
out_reg_d0 <- lm(OR_itt_formula, data = df[df$e401==0,])
out_reg_d1 <- lm(OR_itt_formula, data = df[df$e401==1,])
X_or <- model.matrix(OR_itt_formula, data = df)
# get conditional means
df$out_reg_d0 <- X_or %*% ( out_reg_d0$coefficients)
df$out_reg_d1 <- X_or %*% ( out_reg_d1$coefficients)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Generate potential outcomes
Y0 <- df$out_reg_d0/1000 + rnorm(n_total)
Y1 <- df$out_reg_d1/1000 + rnorm(n_total)
df$sorting <- (Y1 - Y0)
#----------------------------------------------------------------------------
# propensity score Specifications
ps_formula <- as.formula("p401 ~ sorting")
ps_treat <- glm(formula = ps_formula,
family = binomial(link = "logit"),
data = df[df$e401==1,],
x = TRUE)
ps_treat <- ps_treat$coefficients
#ps_treat <- c(0.811, 0.055)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Keep only necessary portions of data
df <- data.frame(
out_reg_d0 = df$out_reg_d0,
out_reg_d1 = df$out_reg_d1,
instrument_ps_baseline = df$instrument_ps,
covariates
)
# df <- df[order(df$inc),]
#----------------------------------------------------------------------------
# True ATE
ate_true <- 0.8106185
# True QTE (based on simulations) - linear OR
# qte_true <- c(-474.4316 , 3420.1549 , 9453.3591, 9544.2945, 15063.9459 )
# NOnlinear OR
qte_true <- c( -0.1476849 , 0.2006515 , 0.7719787 , 1.3601936, 2.0244570 )
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Now, we have all parameters for the simulation
# Let's start
cl <- parallel::makeCluster(ncores)
registerDoSNOW(cl)
# Progress bar
pb <- txtProgressBar(max = nrep, style = 3)
progress <- function(n) setTxtProgressBar(pb,n)
opts <- list(progress=progress)
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#Start the MONTE CARLO loop
MC_sims <- foreach(nn = 1:nrep,
.options.snow = opts,
.errorhandling='remove') %dorng%
{
# Draw covariates (which implies we draw instrument pscores, and OR, too)
id_sim <- sample(1:n_total, n, replace = TRUE )
df_sim <- df[(id_sim),]
#xcov <- df_sim[,-(1:3)]
xcov <- model.matrix(as.formula("~ (inc + loginc + age +fsize + educ+ hown +
marr+ twoearn+ db+ pira) +
I(inc^2) + I(loginc^2) + I(age^2) +
I(fsize^2) + I(educ^2)"), data = df_sim)
# Y0 <- df_sim$out_reg_d0 + stats::rnorm(n)
# Y1 <- df_sim$out_reg_d1 + stats::rnorm(n)
#
Y0 <- df_sim$out_reg_d0/1000 + stats::rnorm(n)
Y1 <- df_sim$out_reg_d1/1000 + stats::rnorm(n)
Z <- stats::rbinom(n, 1, df_sim$instrument_ps)
# Generate pscore and Treatment Status
endog_index <- cbind(1, (Y1-Y0)) %*% ps_treat
ps <- 1/(1+exp(- endog_index))
D1 <- stats::rbinom(n, size=1, prob = ps)
D0 <- rep(0,n)
treat <- Z*D1 + (1-Z) * D0
Y <- Y0 * (1 - treat) + Y1 * treat
if(sum(treat) < 20) next
if(sum(Z) < 50) next
#----------------------------------------------------------------------------
# fit different estimators for the propensity score
#Logit GLM estimator
fit.glm <- glm.fit(x = xcov, y = Z,
family = binomial(link = "logit"))
# GLM
glm.lin.rep <- inflc_glm(Z, xcov, fit.glm$fitted.values)
late_glm <- IPS::LATE(Y,Z, treat, xcov, fit.glm$fitted.values, glm.lin.rep)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# compute QTE using different estimators
tau <- c(0.1, 0.25, 0.5, 0.75, 0.9)
# Bandwidth
bw = "nrd0"
# GLM
lqte_glm <- IPS::LQTE(Y, Z, treat, xcov, fit.glm$fitted.values,
glm.lin.rep, tau, bw = bw)
#----------------------------------------------------------------------------
#Return point estimates and standard errors
out <- c(
# Point estimates
late_glm$late,
lqte_glm$lqte,
# std. errors
late_glm$late.se,
lqte_glm$lqte.se
)
return(out)
}
#-----------------------------------------------------------------------------
#Stop the cluster
stopCluster(cl)
#-----------------------------------------------------------------------------
#Put the Monte Carlo Results in an matrix
mc <- do.call(rbind, MC_sims)
mc_all <- as.matrix(mc)
# mode(mc) = "numeric"
# mc2 <- as.matrix(MC_sims, nrow = nrep, ncol = 72)
#-----------------------------------------------------------------------------
# Mean in the Monte Carlo
mean.mc <- base::colMeans(mc, na.rm = TRUE)
true_effects <- c(rep(ate_true, 1),
rep(qte_true[1], 1),
rep(qte_true[2], 1),
rep(qte_true[3], 1),
rep(qte_true[4], 1),
rep(qte_true[5], 1)
)
true_effects <- matrix(true_effects, nrow = nrow(mc), ncol = 6, byrow = TRUE)
bias_mc <- mean.mc[1:6] - colMeans(true_effects)
RMSE_mc <- sqrt(base::colMeans((mc[,1:6] - true_effects)^2, na.rm = TRUE ))
abs_bias_mc <- (base::colMeans(abs(mc[,1:6] - true_effects), na.rm = TRUE))
coverage <- colMeans(((mc[,1:6] - 1.96*mc[,7:12]) <= true_effects) *
((mc[,1:6] + 1.96*mc[,7:12]) >= true_effects), na.rm = TRUE)
|
\name{Prediction with some naive Bayes classifiers}
\alias{weibullnb.pred}
\alias{normlognb.pred}
\alias{laplacenb.pred}
\title{
Prediction with some naive Bayes classifiers
}
\description{
Prediction with some naive Bayes classifiers.
}
\usage{
weibullnb.pred(xnew, shape, scale, ni)
normlognb.pred(xnew, expmu, sigma, ni)
laplacenb.pred(xnew, location, scale, ni)
}
\arguments{
\item{xnew}{
A numerical matrix with new predictor variables whose group is to be predicted.
For the Gaussian case this contain positive numbers only.
}
\item{shape}{
A matrix with the group shape parameters. Each row corresponds to a group.
}
\item{s}{
A matrix with the group scale parameters. Each row corresponds to a group.
}
\item{expmu}{
A matrix with the mean parameters.
}
\item{sigma}{
A matrix with the (MLE, hence biased) variance parameters.
}
\item{location}{
A matrix with the location parameters (medians).
}
\item{scale}{
A matrix with the scale parameters.
}
\item{ni}{
A vector with the frequencies of each group.
}
}
%\details{
%
%}
\value{
A numerical vector with 1, 2, ... denoting the predicted group.
}
%\references{
%
%}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris \email{mtsagris@yahoo.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{weibull.nb} }
}
\examples{
x <- matrix( rweibull( 100, 3, 4 ), ncol = 4 )
ina <- rbinom(100, 1, 0.5) + 1
a <- weibull.nb(x, x, ina)
est <- weibullnb.pred(x, a$shape, a$scale, a$ni)
table(id[-ina], est)
}
| /man/weibullnb.pred.Rd | no_license | cgpu/Rfast2 | R | false | false | 1,507 | rd | \name{Prediction with some naive Bayes classifiers}
\alias{weibullnb.pred}
\alias{normlognb.pred}
\alias{laplacenb.pred}
\title{
Prediction with some naive Bayes classifiers
}
\description{
Prediction with some naive Bayes classifiers.
}
\usage{
weibullnb.pred(xnew, shape, scale, ni)
normlognb.pred(xnew, expmu, sigma, ni)
laplacenb.pred(xnew, location, scale, ni)
}
\arguments{
\item{xnew}{
A numerical matrix with new predictor variables whose group is to be predicted.
For the Gaussian case this contain positive numbers only.
}
\item{shape}{
A matrix with the group shape parameters. Each row corresponds to a group.
}
\item{s}{
A matrix with the group scale parameters. Each row corresponds to a group.
}
\item{expmu}{
A matrix with the mean parameters.
}
\item{sigma}{
A matrix with the (MLE, hence biased) variance parameters.
}
\item{location}{
A matrix with the location parameters (medians).
}
\item{scale}{
A matrix with the scale parameters.
}
\item{ni}{
A vector with the frequencies of each group.
}
}
%\details{
%
%}
\value{
A numerical vector with 1, 2, ... denoting the predicted group.
}
%\references{
%
%}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris \email{mtsagris@yahoo.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{weibull.nb} }
}
\examples{
x <- matrix( rweibull( 100, 3, 4 ), ncol = 4 )
ina <- rbinom(100, 1, 0.5) + 1
a <- weibull.nb(x, x, ina)
est <- weibullnb.pred(x, a$shape, a$scale, a$ni)
table(id[-ina], est)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/a_class-methods_helpers.R
\name{fetch_data}
\alias{fetch_data}
\title{Get Aggregate Data Specified Geography}
\usage{
fetch_data(acs, geography, dataset = c("estimate", "st.err"), choice = NULL)
}
\arguments{
\item{acs}{An object of class \code{"macroACS"}.}
\item{geography}{A character vector allowing string matching via \code{\link[base]{grep}} to
a set of specified geographies. All values may be specified by \code{"*"}.}
\item{dataset}{Either \code{"estimate"} or \code{"st.err"}. Do you want data on estimated
population counts or estimated standard errors?}
\item{choice}{A character vector specifying the name of one of the datasets in \code{acs}}
}
\description{
Gets aggregate, macro, data, either estimate or standard error, for a specified geography
and specified dataset.
}
| /fuzzedpackages/synthACS/man/fetch_data.Rd | no_license | akhikolla/testpackages | R | false | true | 872 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/a_class-methods_helpers.R
\name{fetch_data}
\alias{fetch_data}
\title{Get Aggregate Data Specified Geography}
\usage{
fetch_data(acs, geography, dataset = c("estimate", "st.err"), choice = NULL)
}
\arguments{
\item{acs}{An object of class \code{"macroACS"}.}
\item{geography}{A character vector allowing string matching via \code{\link[base]{grep}} to
a set of specified geographies. All values may be specified by \code{"*"}.}
\item{dataset}{Either \code{"estimate"} or \code{"st.err"}. Do you want data on estimated
population counts or estimated standard errors?}
\item{choice}{A character vector specifying the name of one of the datasets in \code{acs}}
}
\description{
Gets aggregate, macro, data, either estimate or standard error, for a specified geography
and specified dataset.
}
|
# DIF TRANSFORMED ITEM DIFFICULTIES (ANGOFF's DELTA METHOD)
require(deltaPlotR)
difTID<-function (Data, group, focal.name, thrTID = 1.5,
purify = FALSE, purType = "IPP1", nrIter = 10, alpha = 0.05,
extreme = "constraint", const.range = c(0.001, 0.999), nrAdd = 1,
save.output=FALSE, output=c("out","default"))
{
internalTID<-function(){
if (length(group) == 1) {
if (is.numeric(group)) {
gr <- Data[, group]
DATA <- Data[, (1:ncol(Data)) != group]
colnames(DATA) <- colnames(Data)[(1:ncol(Data)) !=
group]
}
else {
gr <- Data[, colnames(Data) == group]
DATA <- Data[, colnames(Data) != group]
colnames(DATA) <- colnames(Data)[colnames(Data) !=
group]
}
}
else {
gr <- group
DATA <- Data
}
Group <- rep(0, nrow(DATA))
Group[gr == focal.name] <- 1
RES<-deltaPlotR::deltaPlot(data=cbind(Group,DATA),type="response",group=1,focal.name=focal.name,
thr = thrTID, purify = purify, purType = purType, maxIter = nrIter,
alpha =alpha, extreme = extreme, const.range = const.range, nrAdd = nrAdd,
save.output = save.output,output = output)
if (is.null(colnames(DATA))) {
itNames<-1:ncol(DATA)
number<-TRUE
}
else {
itNames<-colnames(DATA)
number<-FALSE
}
RES<-c(RES,list(names=itNames,number=number))
class(RES)<-"TID"
return(RES)
}
resToReturn<-internalTID()
if (save.output){
if (output[2]=="default") wd<-paste(getwd(),"/",sep="")
else wd<-output[2]
fileName<-paste(wd,output[1],".txt",sep="")
capture.output(resToReturn,file=fileName)
}
return(resToReturn)
}
## PLOT METHOD
plot.TID<-function (x, plot="dist",pch = 2, pch.mult = 17, axis.draw = TRUE, thr.draw = FALSE,
dif.draw = c(1, 3), print.corr = FALSE, xlim = NULL, ylim = NULL,
xlab = NULL, ylab = NULL, main = NULL, col="red", number=TRUE,save.plot = FALSE,
save.options = c("plot", "default", "pdf"),...)
{
PLOT<-switch(plot,dist=1,delta=2)
if(is.null(PLOT)) stop("'plot' must be either 'dist' or 'delta'",call.=FALSE)
if (PLOT==2) deltaPlotR::diagPlot(x,pch = pch, pch.mult = pch.mult,
axis.draw = axis.draw, thr.draw = thr.draw, dif.draw = dif.draw,
print.corr = print.corr, xlim = xlim , ylim = ylim, xlab = xlab,
ylab = ylab, main = main, save.plot = save.plot,
save.options = save.options)
if (PLOT==1) {
internalTID <- function() {
res <- x
yl<-c(min(c(res$Dist,-abs(res$thr)),na.rm=TRUE)-0.1,max(c(res$Dist,abs(res$thr)),na.rm=TRUE)+0.1)
plot(res$Dist,xlab = "Item", ylab = "Perpendicular distance",ylim = yl, col="white", main = "Transformed Item Difficulties")
if (!number){
text(1:length(res$Dist), res$Dist, res$names)
if (!is.character(res$DIFitems)) text(res$DIFitems, res$Dist[res$DIFitems], res$names[res$DIFitems],col = col)
}
else{
text(1:length(res$Dist), res$Dist, 1:length(res$Dist))
if (!is.character(res$DIFitems)) text(res$DIFitems, res$Dist[res$DIFitems], res$DIFitems,col = col)
}
abline(h = -abs(res$thr))
abline(h = abs(res$thr))
}
internalTID()
if (save.plot){
plotype<-NULL
if (save.options[3]=="pdf") plotype<-1
if (save.options[3]=="jpeg") plotype<-2
if (is.null(plotype)) cat("Invalid plot type (should be either 'pdf' or 'jpeg').","\n","The plot was not captured!","\n")
else {
if (save.options[2]=="default") wd<-paste(getwd(),"/",sep="")
else wd<-save.options[2]
fileName<-paste(wd,save.options[1],switch(plotype,'1'=".pdf",'2'=".jpg"),sep="")
if (plotype==1){
{
pdf(file=fileName)
internalTID()
}
dev.off()
}
if (plotype==2){
{
jpeg(filename=fileName)
internalTID()
}
dev.off()
}
cat("The plot was captured and saved into","\n"," '",fileName,"'","\n","\n",sep="")
}
}
else cat("The plot was not captured!","\n",sep="")
}
}
## PRINT METHOD
print.TID<-function (x, only.final = TRUE, ...)
{
res <- x
cat("\n")
cat("Detection of Differential Item Functioning using Angoff's Delta method",
"\n")
if (res$purify)
cat(" with item purification", "\n", "\n")
else cat(" without item purification", "\n", "\n")
if (res$purify) {
if (res$convergence) {
if (res$nrIter == 1)
cat("Convergence reached after", res$nrIter,
"iteration", "\n", "\n")
else cat("Convergence reached after", res$nrIter,
"iterations", "\n", "\n")
}
else {
cat("WARNING: convergence was not reached after",
res$maxIter, "iterations!", "\n", "\n")
}
if (res$nrIter > 1) {
if (res$purType == "IPP1") {
cat("Threshold kept fixed to", res$thr[1], "\n")
if (res$rule == "fixed")
cat(" (as fixed by the user [IPP1])", "\n",
"\n")
else cat(" (as computed from normal approximation [IPP1])",
"\n", "\n")
}
else {
cat("Threshold adjusted iteratively using normal approximation",
"\n")
cat(" and ", round(res$alpha * 100), "% significance level",
"\n", sep = "")
if (res$purType == "IPP2")
cat(" (only slope parameter updated [IPP2])",
"\n", "\n")
else cat(" (full update of the threshold [IPP3])",
"\n", "\n")
}
}
}
if (res$adjust.extreme == "constraint")
cat("Extreme proportions adjusted by constraining to [",
round(res$const.range[1], 3), "; ", res$const.range[2],
"]", "\n", "\n", sep = "")
else {
if (res$nrAdd == 1)
cat("Extreme proportions adjusted by adding one success and one failure",
"\n", "\n")
else cat("Extreme proportions adjusted by adding ", res$nrAdd,
" successes and ", res$nrAdd, " failures", "\n",
"\n", sep = "")
}
if (res$purify)
cat("Statistics (after the first iteration):", "\n",
"\n")
else cat("Statistics:", "\n", "\n")
m1 <- round(cbind(res$Props, res$Deltas, res$Dist[, 1]),
4)
symb <- symnum(abs(as.numeric(res$Dist[, 1])), c(0, abs(res$thr[length(res$thr)]),
Inf), symbols = c("", "***"))
m1 <- noquote(cbind(format(m1, justify = "right"), symb))
colnames(m1) <- c("Prop.Ref", "Prop.Foc", "Delta.Ref",
"Delta.Foc", "Dist.", "")
if (res$number){
rn <- NULL
for (i in 1:nrow(m1)) rn[i] <- paste("Item", i, sep = "")
}
else rn<-res$names
rownames(m1) <- rn
print(m1)
cat("\n")
cat("Code: '***' if item is flagged as DIF", "\n", "\n")
if (res$purify) {
cat("Statistics (after the last iteration):", "\n", "\n")
m1 <- round(cbind(res$Props, res$Deltas, res$Dist[,
ncol(res$Dist)]), 4)
symb <- symnum(abs(as.numeric(res$Dist[, ncol(res$Dist)])),
c(0, abs(res$thr[length(res$thr)]), Inf), symbols = c("",
"***"))
m1 <- noquote(cbind(format(m1, justify = "right"),
symb))
colnames(m1) <- c("Prop.Ref", "Prop.Foc", "Delta.Ref",
"Delta.Foc", "Dist.", "")
if (res$number){
rn <- NULL
for (i in 1:nrow(m1)) rn[i] <- paste("Item", i, sep = "")
}
else rn<-res$names
rownames(m1) <- rn
print(m1)
cat("\n")
cat("Code: '***' if item is flagged as DIF", "\n", "\n")
}
if (!only.final) {
cat("Perpendicular distances:", "\n", "\n")
m1 <- round(res$Dist, 4)
rc <- NULL
for (t in 1:ncol(res$Dist)) rc[t] <- paste("Iter", t,
sep = "")
colnames(m1) <- rc
rn <- NULL
for (i in 1:nrow(m1)) rn[i] <- paste("Item", i, sep = "")
rownames(m1) <- rn
print(m1)
cat("\n")
}
myBool <- ifelse(!res$purify, TRUE, ifelse(res$nrIter ==
1, TRUE, FALSE))
if (myBool) {
cat("Parameters of the major axis:", "\n", "\n")
np <- round(rbind(res$axis.par), 4)
rownames(np) <- ""
colnames(np) <- c("a", "b")
print(np)
cat("\n")
if (res$rule == "norm")
cat("Detection threshold: ", round(res$thr, 4), " (significance level: ",
round(res$alpha * 100, 0), "%)", sep = "", "\n",
"\n")
else cat("Detection threshold: ", round(res$thr, 4),
sep = "", "\n", "\n")
}
else {
if (only.final) {
cat("Parameters of the major axis (first and last iterations only):",
"\n", "\n")
if (is.null(dim(res$axis.par)))
np <- round(rbind(res$axis.par, res$axis.par),
4)
else np <- round(rbind(res$axis.par[c(1, nrow(res$axis.par)),
]), 4)
rownames(np) <- c("First", "Last")
colnames(np) <- c("a", "b")
print(np)
cat("\n")
if (res$rule == "norm") {
cat("First and last detection thresholds: ",
round(res$thr[1], 4), " and ", round(res$thr[length(res$thr)],
4), sep = "", "\n")
cat(" (significance level: ", round(res$alpha *
100, 0), "%)", sep = "", "\n", "\n")
}
else cat("First and last detection thresholds: ",
round(res$thr[1], 4), " and ", round(res$thr[length(res$thr)],
4), sep = "", "\n")
}
else {
cat("Parameters of the major axis:", "\n", "\n")
np <- round(rbind(res$axis.par), 4)
npr <- NULL
for (i in 1:nrow(res$axis.par)) npr[i] <- paste("Iter",
i, sep = "")
rownames(np) <- npr
colnames(np) <- c("a", "b")
print(np)
cat("\n")
cat("Detection thresholds:", "\n", "\n")
mm <- rbind(res$thr)
rownames(mm) <- ""
cn <- NULL
for (i in 1:length(res$thr)) cn[i] <- paste("Iter",
i, sep = "")
colnames(mm) <- cn
print(mm)
cat("\n")
if (res$rule == "norm")
cat("(significance level: ", round(res$alpha *
100, 0), "%)", sep = "", "\n", "\n")
else cat("\n")
}
}
if (is.character(res$DIFitems))
cat("Items detected as DIF items:", res$DIFitems, "\n",
"\n")
else {
cat("Items detected as DIF items:", "\n")
namedif <- NULL
for (i in 1:length(res$DIFitems)) {
if (res$number) namedif[i] <- paste("Item", res$DIFitems[i], sep = "")
else namedif[i] <- res$names[res$DIFitems[i]]
}
m3 <- cbind(namedif)
rownames(m3) <- rep("", length(res$DIFitems))
colnames(m3) <- ""
print(m3, quote = FALSE)
cat("\n")
}
if (!res$save.output)
cat("Output was not captured!", "\n")
else {
if (res$output[2] == "default")
wd <- file.path(getwd())
else wd <- res$output[2]
nameF <- paste(res$output[1], ".txt", sep = "")
fileName <- file.path(wd, nameF)
cat("Output was captured and saved into file", "\n",
" '", fileName, "'", "\n", "\n", sep = "")
}
}
| /R/difTID.r | no_license | cran/difR | R | false | false | 11,737 | r | # DIF TRANSFORMED ITEM DIFFICULTIES (ANGOFF's DELTA METHOD)
require(deltaPlotR)
difTID<-function (Data, group, focal.name, thrTID = 1.5,
purify = FALSE, purType = "IPP1", nrIter = 10, alpha = 0.05,
extreme = "constraint", const.range = c(0.001, 0.999), nrAdd = 1,
save.output=FALSE, output=c("out","default"))
{
internalTID<-function(){
if (length(group) == 1) {
if (is.numeric(group)) {
gr <- Data[, group]
DATA <- Data[, (1:ncol(Data)) != group]
colnames(DATA) <- colnames(Data)[(1:ncol(Data)) !=
group]
}
else {
gr <- Data[, colnames(Data) == group]
DATA <- Data[, colnames(Data) != group]
colnames(DATA) <- colnames(Data)[colnames(Data) !=
group]
}
}
else {
gr <- group
DATA <- Data
}
Group <- rep(0, nrow(DATA))
Group[gr == focal.name] <- 1
RES<-deltaPlotR::deltaPlot(data=cbind(Group,DATA),type="response",group=1,focal.name=focal.name,
thr = thrTID, purify = purify, purType = purType, maxIter = nrIter,
alpha =alpha, extreme = extreme, const.range = const.range, nrAdd = nrAdd,
save.output = save.output,output = output)
if (is.null(colnames(DATA))) {
itNames<-1:ncol(DATA)
number<-TRUE
}
else {
itNames<-colnames(DATA)
number<-FALSE
}
RES<-c(RES,list(names=itNames,number=number))
class(RES)<-"TID"
return(RES)
}
resToReturn<-internalTID()
if (save.output){
if (output[2]=="default") wd<-paste(getwd(),"/",sep="")
else wd<-output[2]
fileName<-paste(wd,output[1],".txt",sep="")
capture.output(resToReturn,file=fileName)
}
return(resToReturn)
}
## PLOT METHOD
plot.TID<-function (x, plot="dist",pch = 2, pch.mult = 17, axis.draw = TRUE, thr.draw = FALSE,
dif.draw = c(1, 3), print.corr = FALSE, xlim = NULL, ylim = NULL,
xlab = NULL, ylab = NULL, main = NULL, col="red", number=TRUE,save.plot = FALSE,
save.options = c("plot", "default", "pdf"),...)
{
PLOT<-switch(plot,dist=1,delta=2)
if(is.null(PLOT)) stop("'plot' must be either 'dist' or 'delta'",call.=FALSE)
if (PLOT==2) deltaPlotR::diagPlot(x,pch = pch, pch.mult = pch.mult,
axis.draw = axis.draw, thr.draw = thr.draw, dif.draw = dif.draw,
print.corr = print.corr, xlim = xlim , ylim = ylim, xlab = xlab,
ylab = ylab, main = main, save.plot = save.plot,
save.options = save.options)
if (PLOT==1) {
internalTID <- function() {
res <- x
yl<-c(min(c(res$Dist,-abs(res$thr)),na.rm=TRUE)-0.1,max(c(res$Dist,abs(res$thr)),na.rm=TRUE)+0.1)
plot(res$Dist,xlab = "Item", ylab = "Perpendicular distance",ylim = yl, col="white", main = "Transformed Item Difficulties")
if (!number){
text(1:length(res$Dist), res$Dist, res$names)
if (!is.character(res$DIFitems)) text(res$DIFitems, res$Dist[res$DIFitems], res$names[res$DIFitems],col = col)
}
else{
text(1:length(res$Dist), res$Dist, 1:length(res$Dist))
if (!is.character(res$DIFitems)) text(res$DIFitems, res$Dist[res$DIFitems], res$DIFitems,col = col)
}
abline(h = -abs(res$thr))
abline(h = abs(res$thr))
}
internalTID()
if (save.plot){
plotype<-NULL
if (save.options[3]=="pdf") plotype<-1
if (save.options[3]=="jpeg") plotype<-2
if (is.null(plotype)) cat("Invalid plot type (should be either 'pdf' or 'jpeg').","\n","The plot was not captured!","\n")
else {
if (save.options[2]=="default") wd<-paste(getwd(),"/",sep="")
else wd<-save.options[2]
fileName<-paste(wd,save.options[1],switch(plotype,'1'=".pdf",'2'=".jpg"),sep="")
if (plotype==1){
{
pdf(file=fileName)
internalTID()
}
dev.off()
}
if (plotype==2){
{
jpeg(filename=fileName)
internalTID()
}
dev.off()
}
cat("The plot was captured and saved into","\n"," '",fileName,"'","\n","\n",sep="")
}
}
else cat("The plot was not captured!","\n",sep="")
}
}
## PRINT METHOD
print.TID<-function (x, only.final = TRUE, ...)
{
res <- x
cat("\n")
cat("Detection of Differential Item Functioning using Angoff's Delta method",
"\n")
if (res$purify)
cat(" with item purification", "\n", "\n")
else cat(" without item purification", "\n", "\n")
if (res$purify) {
if (res$convergence) {
if (res$nrIter == 1)
cat("Convergence reached after", res$nrIter,
"iteration", "\n", "\n")
else cat("Convergence reached after", res$nrIter,
"iterations", "\n", "\n")
}
else {
cat("WARNING: convergence was not reached after",
res$maxIter, "iterations!", "\n", "\n")
}
if (res$nrIter > 1) {
if (res$purType == "IPP1") {
cat("Threshold kept fixed to", res$thr[1], "\n")
if (res$rule == "fixed")
cat(" (as fixed by the user [IPP1])", "\n",
"\n")
else cat(" (as computed from normal approximation [IPP1])",
"\n", "\n")
}
else {
cat("Threshold adjusted iteratively using normal approximation",
"\n")
cat(" and ", round(res$alpha * 100), "% significance level",
"\n", sep = "")
if (res$purType == "IPP2")
cat(" (only slope parameter updated [IPP2])",
"\n", "\n")
else cat(" (full update of the threshold [IPP3])",
"\n", "\n")
}
}
}
if (res$adjust.extreme == "constraint")
cat("Extreme proportions adjusted by constraining to [",
round(res$const.range[1], 3), "; ", res$const.range[2],
"]", "\n", "\n", sep = "")
else {
if (res$nrAdd == 1)
cat("Extreme proportions adjusted by adding one success and one failure",
"\n", "\n")
else cat("Extreme proportions adjusted by adding ", res$nrAdd,
" successes and ", res$nrAdd, " failures", "\n",
"\n", sep = "")
}
if (res$purify)
cat("Statistics (after the first iteration):", "\n",
"\n")
else cat("Statistics:", "\n", "\n")
m1 <- round(cbind(res$Props, res$Deltas, res$Dist[, 1]),
4)
symb <- symnum(abs(as.numeric(res$Dist[, 1])), c(0, abs(res$thr[length(res$thr)]),
Inf), symbols = c("", "***"))
m1 <- noquote(cbind(format(m1, justify = "right"), symb))
colnames(m1) <- c("Prop.Ref", "Prop.Foc", "Delta.Ref",
"Delta.Foc", "Dist.", "")
if (res$number){
rn <- NULL
for (i in 1:nrow(m1)) rn[i] <- paste("Item", i, sep = "")
}
else rn<-res$names
rownames(m1) <- rn
print(m1)
cat("\n")
cat("Code: '***' if item is flagged as DIF", "\n", "\n")
if (res$purify) {
cat("Statistics (after the last iteration):", "\n", "\n")
m1 <- round(cbind(res$Props, res$Deltas, res$Dist[,
ncol(res$Dist)]), 4)
symb <- symnum(abs(as.numeric(res$Dist[, ncol(res$Dist)])),
c(0, abs(res$thr[length(res$thr)]), Inf), symbols = c("",
"***"))
m1 <- noquote(cbind(format(m1, justify = "right"),
symb))
colnames(m1) <- c("Prop.Ref", "Prop.Foc", "Delta.Ref",
"Delta.Foc", "Dist.", "")
if (res$number){
rn <- NULL
for (i in 1:nrow(m1)) rn[i] <- paste("Item", i, sep = "")
}
else rn<-res$names
rownames(m1) <- rn
print(m1)
cat("\n")
cat("Code: '***' if item is flagged as DIF", "\n", "\n")
}
if (!only.final) {
cat("Perpendicular distances:", "\n", "\n")
m1 <- round(res$Dist, 4)
rc <- NULL
for (t in 1:ncol(res$Dist)) rc[t] <- paste("Iter", t,
sep = "")
colnames(m1) <- rc
rn <- NULL
for (i in 1:nrow(m1)) rn[i] <- paste("Item", i, sep = "")
rownames(m1) <- rn
print(m1)
cat("\n")
}
myBool <- ifelse(!res$purify, TRUE, ifelse(res$nrIter ==
1, TRUE, FALSE))
if (myBool) {
cat("Parameters of the major axis:", "\n", "\n")
np <- round(rbind(res$axis.par), 4)
rownames(np) <- ""
colnames(np) <- c("a", "b")
print(np)
cat("\n")
if (res$rule == "norm")
cat("Detection threshold: ", round(res$thr, 4), " (significance level: ",
round(res$alpha * 100, 0), "%)", sep = "", "\n",
"\n")
else cat("Detection threshold: ", round(res$thr, 4),
sep = "", "\n", "\n")
}
else {
if (only.final) {
cat("Parameters of the major axis (first and last iterations only):",
"\n", "\n")
if (is.null(dim(res$axis.par)))
np <- round(rbind(res$axis.par, res$axis.par),
4)
else np <- round(rbind(res$axis.par[c(1, nrow(res$axis.par)),
]), 4)
rownames(np) <- c("First", "Last")
colnames(np) <- c("a", "b")
print(np)
cat("\n")
if (res$rule == "norm") {
cat("First and last detection thresholds: ",
round(res$thr[1], 4), " and ", round(res$thr[length(res$thr)],
4), sep = "", "\n")
cat(" (significance level: ", round(res$alpha *
100, 0), "%)", sep = "", "\n", "\n")
}
else cat("First and last detection thresholds: ",
round(res$thr[1], 4), " and ", round(res$thr[length(res$thr)],
4), sep = "", "\n")
}
else {
cat("Parameters of the major axis:", "\n", "\n")
np <- round(rbind(res$axis.par), 4)
npr <- NULL
for (i in 1:nrow(res$axis.par)) npr[i] <- paste("Iter",
i, sep = "")
rownames(np) <- npr
colnames(np) <- c("a", "b")
print(np)
cat("\n")
cat("Detection thresholds:", "\n", "\n")
mm <- rbind(res$thr)
rownames(mm) <- ""
cn <- NULL
for (i in 1:length(res$thr)) cn[i] <- paste("Iter",
i, sep = "")
colnames(mm) <- cn
print(mm)
cat("\n")
if (res$rule == "norm")
cat("(significance level: ", round(res$alpha *
100, 0), "%)", sep = "", "\n", "\n")
else cat("\n")
}
}
if (is.character(res$DIFitems))
cat("Items detected as DIF items:", res$DIFitems, "\n",
"\n")
else {
cat("Items detected as DIF items:", "\n")
namedif <- NULL
for (i in 1:length(res$DIFitems)) {
if (res$number) namedif[i] <- paste("Item", res$DIFitems[i], sep = "")
else namedif[i] <- res$names[res$DIFitems[i]]
}
m3 <- cbind(namedif)
rownames(m3) <- rep("", length(res$DIFitems))
colnames(m3) <- ""
print(m3, quote = FALSE)
cat("\n")
}
if (!res$save.output)
cat("Output was not captured!", "\n")
else {
if (res$output[2] == "default")
wd <- file.path(getwd())
else wd <- res$output[2]
nameF <- paste(res$output[1], ".txt", sep = "")
fileName <- file.path(wd, nameF)
cat("Output was captured and saved into file", "\n",
" '", fileName, "'", "\n", "\n", sep = "")
}
}
|
# Specify the location of the module from the data system root directory
MODULE_PROC_ROOT <- AGLUPROC_DIR
# -----------------------------------------------------------------------------
# rename_biocrops: a function for changing the names of "biomass" in selected region/AEZs
rename_biocrops <- function( data, lookup, data_matchvar, lookup_matchvar, data_var1, data_var2=NA, data_var3 = NA ){
data_new <- data
data_new$ID <- paste( data_new$region, data_new[[data_matchvar]] )
lookup$ID <- paste( lookup$region, lookup[[lookup_matchvar]] )
data_new[ data_new$ID %in% lookup$ID, data_var1 ] <- lookup[
match( data_new$ID[ data_new$ID %in% lookup$ID ], lookup$ID ),
data_var1 ]
if( !is.na( data_var2 ) ) {
data_new[ data_new$ID %in% lookup$ID, data_var2 ] <- lookup[
match( data_new$ID[ data_new$ID %in% lookup$ID ], lookup$ID ),
data_var2 ] }
if( !is.na( data_var3 ) ) {
data_new[ data_new$ID %in% lookup$ID, data_var3 ] <- lookup[
match( data_new$ID[ data_new$ID %in% lookup$ID ], lookup$ID ),
data_var3 ] }
data_new <- data_new[ names( data_new ) != "ID" ]
return (data_new )
}
# -----------------------------------------------------------------------------
#downscale_FAO_country: function to downscale the countries that separated into multiple modern countries (e.g. USSR).
downscale_FAO_country <- function( data, country_name, dissolution_year, item_name = "item",
element_name = "element", years = AGLU_historical_years ){
X_dissolution_year <- paste( "X", dissolution_year, sep = "" )
ctry_years <- years[ years < dissolution_year ]
X_ctry_years <- paste( "X", ctry_years, sep = "" )
data_ratio <- aggregate( data[ c( X_ctry_years, X_dissolution_year ) ], by=as.list( data[ c( item_name, element_name ) ] ), sum )
data_ratio[ c( X_ctry_years, X_dissolution_year ) ] <- data_ratio[ c( X_ctry_years, X_dissolution_year ) ] / data_ratio[[ X_dissolution_year ]]
data_new <- subset( data, countries != country_name )
data_new[ X_ctry_years ] <- data_new[[ X_dissolution_year ]] * data_ratio[
match( vecpaste( data_new[ c( item_name, element_name ) ] ), vecpaste( data_ratio[ c( item_name, element_name ) ] ) ), X_ctry_years ]
data_new[ X_ctry_years ][ is.na( data_new[ X_ctry_years ] ) ] <- 0
return( data_new )
}
#interpolate_IMAGE_years: function to interpolate IMAGE tables to all historical years
interpolate_IMAGE_years <- function( data, idvars, years = AGLU_historical_years ){
data.melt <- melt( data, id.vars = idvars )
data.melt$IMAGE_region_ID <- as.numeric( substr( as.character( data.melt$variable ), 2, nchar( as.character( data.melt$variable ) ) ) )
data.melt$year <- paste( "X", data.melt$year, sep = "" )
data_new <- dcast( data.melt, IMAGE_region_ID + ... ~ year )
data_new <- data_new[ names( data_new) != "variable" ]
data_new$X1960 <- data_new$X1970
data_new <- gcam_interp( data_new, years )
return( data_new )
}
#downscale_IMAGE_regions: function to downscale the IMAGE regions to all countries( iso)
downscale_IMAGE_regions <- function( data, idvars, years = X_AGLU_historical_years ){
data_new <- data[ data$IMAGE_region_ID==1, !names( data ) %in% years ]
data_new <- repeat_and_add_vector( data_new, "iso", sort( unique( AGLU_ctry$iso ) ) )
data_new$IMAGE_region_ID <- AGLU_ctry$IMAGE_region_ID[ match( data_new$iso, AGLU_ctry$iso ) ]
data_new[ years ] <- data[
match( vecpaste( data_new[ idvars ] ),
vecpaste( data[ idvars ] ) ),
years ]
data_new <- na.omit( data_new )
data_new <- data_new[ c("iso", idvars, years ) ]
}
# TODO: figure out why there are two, can we move these to GCAM_header.R? I just need the basic functionality to use in get_logit_fn_tables
write_to_all_regions <- function( data, names, has.traded=F, apply.to = "selected", set.market = F ){
if ( "logit.year.fillout" %in% names ) data$logit.year.fillout <- "start-year"
if ( "price.exp.year.fillout" %in% names ) data$price.exp.year.fillout <- "start-year"
data_new <- set_years( data )
data_new <- repeat_and_add_vector( data_new, "GCAM_region_ID", GCAM_region_names$GCAM_region_ID )
data_new <- add_region_name( data_new )
if( has.traded==T){
if( set.market==T){
data_new$market.name <- data_new$region
}
data_new <- set_traded_names( data_new, apply.to )
}
return( data_new[ names ] )
}
# -----------------------------------------------------------------------------
# write_to_all_regions_ag: write out ag table to all regions
write_to_all_regions_ag <- function( data, names ){
if ( "logit.year.fillout" %in% names ) data$logit.year.fillout <- "start-year"
data_new <- set_years( data )
data_new <- repeat_and_add_vector( data_new, "GCAM_region_ID", GCAM_region_names$GCAM_region_ID )
data_new <- add_region_name( data_new )
if ("market.name" %in% names ) data_new$market.name <- data_new$region
return( data_new[ names ] )
}
#remove non-existent AEZs
remove_AEZ_nonexist <- function( data, AEZcol = "AgSupplySubsector", AEZnonexist = L125.R_AEZ_nonexist ){
AEZnonexist <- add_region_name( AEZnonexist )
data[[AEZ]] <- substr( as.character( data[[AEZcol]] ), nchar( as.character( data[[AEZcol]] ) ) - 4, nchar( as.character( data[[AEZcol]] ) ) )
data <- data[ !vecpaste( data[ c( reg, AEZ ) ] ) %in% vecpaste( AEZnonexist[ c( reg, AEZ ) ] ), ]
data <- data[ names( data ) != AEZ ]
}
#add_agtech_names: function to use the commodity and AEZ names to create agsupplysectors, subsectors, technologies
add_agtech_names <- function( data ){
data[[agsupp]] <- data[[C]]
data[[agsubs]] <- paste( data[[C]], data[[AEZ]], sep = AEZ_delimiter )
data[[agtech]] <- paste( data[[C]], data[[AEZ]], sep = AEZ_delimiter )
return( data )
}
#append_AEZ: function to append AEZ to all specified variables
append_AEZ <- function( data, var1 = "LandNode1", var2 = NA, var3 = NA, var4 = NA, var5 = NA ){
data[[var1]] <- paste( data[[var1]], data[[AEZ]], sep = AEZ_delimiter )
if( !is.na( var2 ) ){ data[[var2]] <- paste( data[[var2]], data[[AEZ]], sep = AEZ_delimiter ) }
if( !is.na( var3 ) ){ data[[var3]] <- paste( data[[var3]], data[[AEZ]], sep = AEZ_delimiter ) }
if( !is.na( var4 ) ){ data[[var4]] <- paste( data[[var4]], data[[AEZ]], sep = AEZ_delimiter ) }
if( !is.na( var5 ) ){ data[[var5]] <- paste( data[[var5]], data[[AEZ]], sep = AEZ_delimiter ) }
return( data )
}
#add_node_leaf_names: function to match in the node and leaf names from a land nesting table
add_node_leaf_names <- function( data, nesting_table, leaf_name, LT_name = LT, LN1 = "LandNode1", LN2 = NA, LN3 = NA, LN4 = NA, append_AEZ = T ){
data$LandAllocatorRoot <- "root"
data[[LN1]] <- nesting_table[[LN1]][ match( data[[LT_name]], nesting_table[[leaf_name]] ) ]
if( !is.na( LN2 ) ){ data[[LN2]] <- nesting_table[[LN2]][ match( data[[LT_name]], nesting_table[[leaf_name]] ) ] }
if( !is.na( LN3 ) ){ data[[LN3]] <- nesting_table[[LN3]][ match( data[[LT_name]], nesting_table[[leaf_name]] ) ] }
if( !is.na( LN4 ) ){ data[[LN4]] <- nesting_table[[LN4]][ match( data[[LT_name]], nesting_table[[leaf_name]] ) ] }
data[[leaf_name]] <- data[[LT_name]]
if( append_AEZ == T ){ data <- append_AEZ( data, var1 = leaf_name, var2 = LN1, var3 = LN2, var4 = LN3, var5 = LN4 ) }
return( data )
}
#add_carbon_info: function to add in the carbon densities and mature ages from specified tables, matching on specified parameters
add_carbon_info <- function( data, veg_data, soil_data, age_data, LT_name = "Cdensity_LT" ){
data$hist.veg.carbon.density <- veg_data$value[
match( vecpaste( data[ c( "region", LT_name, AEZ ) ] ),
vecpaste( veg_data[ c( "region", LT, AEZ ) ] ) ) ]
data$hist.soil.carbon.density <- soil_data$value[
match( vecpaste( data[ c( "region", LT_name, AEZ ) ] ),
vecpaste( soil_data[ c( "region", LT, AEZ ) ] ) ) ]
data$mature.age.year.fillout <- min( model_base_years )
data$mature.age <- age_data$value[
match( vecpaste( data[ c( "region", LT_name, AEZ ) ] ),
vecpaste( age_data[ c( "region", LT, AEZ ) ] ) ) ]
data$veg.carbon.density <- data$hist.veg.carbon.density
data$soil.carbon.density <- data$hist.soil.carbon.density
data$min.veg.carbon.density <- min.veg.carbon.density
data$min.soil.carbon.density <- min.soil.carbon.density
return( data )
}
reduce_mgd_carbon <- function( data, LTfor = "Forest", LTpast = "Pasture" ){
data[ data[[LT]] == LTpast, c( "hist.veg.carbon.density", "veg.carbon.density" ) ] <-
data[ data[[LT]] == LTpast, c( "hist.veg.carbon.density", "veg.carbon.density" ) ] * Cveg_Mult_UnmgdPast_MgdPast
data[ data[[LT]] == LTpast, c( "hist.soil.carbon.density", "soil.carbon.density" ) ] <-
data[ data[[LT]] == LTpast, c( "hist.soil.carbon.density", "soil.carbon.density" ) ] * Csoil_Mult_UnmgdPast_MgdPast
data[ data[[LT]] == LTfor, c( "hist.veg.carbon.density", "veg.carbon.density" ) ] <-
data[ data[[LT]] == LTfor, c( "hist.veg.carbon.density", "veg.carbon.density" ) ] * Cveg_Mult_UnmgdFor_MgdFor
data[ data[[LT]] == LTfor, c( "hist.soil.carbon.density", "soil.carbon.density" ) ] <-
data[ data[[LT]] == LTfor, c( "hist.soil.carbon.density", "soil.carbon.density" ) ] * Csoil_Mult_UnmgdFor_MgdFor
return( data )
}
| /input/gcam-data-system/_common/headers/AGLU_header.R | permissive | Randynat/gcam-core | R | false | false | 9,274 | r | # Specify the location of the module from the data system root directory
MODULE_PROC_ROOT <- AGLUPROC_DIR
# -----------------------------------------------------------------------------
# rename_biocrops: a function for changing the names of "biomass" in selected region/AEZs
rename_biocrops <- function( data, lookup, data_matchvar, lookup_matchvar, data_var1, data_var2=NA, data_var3 = NA ){
data_new <- data
data_new$ID <- paste( data_new$region, data_new[[data_matchvar]] )
lookup$ID <- paste( lookup$region, lookup[[lookup_matchvar]] )
data_new[ data_new$ID %in% lookup$ID, data_var1 ] <- lookup[
match( data_new$ID[ data_new$ID %in% lookup$ID ], lookup$ID ),
data_var1 ]
if( !is.na( data_var2 ) ) {
data_new[ data_new$ID %in% lookup$ID, data_var2 ] <- lookup[
match( data_new$ID[ data_new$ID %in% lookup$ID ], lookup$ID ),
data_var2 ] }
if( !is.na( data_var3 ) ) {
data_new[ data_new$ID %in% lookup$ID, data_var3 ] <- lookup[
match( data_new$ID[ data_new$ID %in% lookup$ID ], lookup$ID ),
data_var3 ] }
data_new <- data_new[ names( data_new ) != "ID" ]
return (data_new )
}
# -----------------------------------------------------------------------------
#downscale_FAO_country: function to downscale the countries that separated into multiple modern countries (e.g. USSR).
downscale_FAO_country <- function( data, country_name, dissolution_year, item_name = "item",
element_name = "element", years = AGLU_historical_years ){
X_dissolution_year <- paste( "X", dissolution_year, sep = "" )
ctry_years <- years[ years < dissolution_year ]
X_ctry_years <- paste( "X", ctry_years, sep = "" )
data_ratio <- aggregate( data[ c( X_ctry_years, X_dissolution_year ) ], by=as.list( data[ c( item_name, element_name ) ] ), sum )
data_ratio[ c( X_ctry_years, X_dissolution_year ) ] <- data_ratio[ c( X_ctry_years, X_dissolution_year ) ] / data_ratio[[ X_dissolution_year ]]
data_new <- subset( data, countries != country_name )
data_new[ X_ctry_years ] <- data_new[[ X_dissolution_year ]] * data_ratio[
match( vecpaste( data_new[ c( item_name, element_name ) ] ), vecpaste( data_ratio[ c( item_name, element_name ) ] ) ), X_ctry_years ]
data_new[ X_ctry_years ][ is.na( data_new[ X_ctry_years ] ) ] <- 0
return( data_new )
}
#interpolate_IMAGE_years: function to interpolate IMAGE tables to all historical years
interpolate_IMAGE_years <- function( data, idvars, years = AGLU_historical_years ){
data.melt <- melt( data, id.vars = idvars )
data.melt$IMAGE_region_ID <- as.numeric( substr( as.character( data.melt$variable ), 2, nchar( as.character( data.melt$variable ) ) ) )
data.melt$year <- paste( "X", data.melt$year, sep = "" )
data_new <- dcast( data.melt, IMAGE_region_ID + ... ~ year )
data_new <- data_new[ names( data_new) != "variable" ]
data_new$X1960 <- data_new$X1970
data_new <- gcam_interp( data_new, years )
return( data_new )
}
#downscale_IMAGE_regions: function to downscale the IMAGE regions to all countries( iso)
downscale_IMAGE_regions <- function( data, idvars, years = X_AGLU_historical_years ){
data_new <- data[ data$IMAGE_region_ID==1, !names( data ) %in% years ]
data_new <- repeat_and_add_vector( data_new, "iso", sort( unique( AGLU_ctry$iso ) ) )
data_new$IMAGE_region_ID <- AGLU_ctry$IMAGE_region_ID[ match( data_new$iso, AGLU_ctry$iso ) ]
data_new[ years ] <- data[
match( vecpaste( data_new[ idvars ] ),
vecpaste( data[ idvars ] ) ),
years ]
data_new <- na.omit( data_new )
data_new <- data_new[ c("iso", idvars, years ) ]
}
# TODO: figure out why there are two, can we move these to GCAM_header.R? I just need the basic functionality to use in get_logit_fn_tables
write_to_all_regions <- function( data, names, has.traded=F, apply.to = "selected", set.market = F ){
if ( "logit.year.fillout" %in% names ) data$logit.year.fillout <- "start-year"
if ( "price.exp.year.fillout" %in% names ) data$price.exp.year.fillout <- "start-year"
data_new <- set_years( data )
data_new <- repeat_and_add_vector( data_new, "GCAM_region_ID", GCAM_region_names$GCAM_region_ID )
data_new <- add_region_name( data_new )
if( has.traded==T){
if( set.market==T){
data_new$market.name <- data_new$region
}
data_new <- set_traded_names( data_new, apply.to )
}
return( data_new[ names ] )
}
# -----------------------------------------------------------------------------
# write_to_all_regions_ag: write out ag table to all regions
write_to_all_regions_ag <- function( data, names ){
if ( "logit.year.fillout" %in% names ) data$logit.year.fillout <- "start-year"
data_new <- set_years( data )
data_new <- repeat_and_add_vector( data_new, "GCAM_region_ID", GCAM_region_names$GCAM_region_ID )
data_new <- add_region_name( data_new )
if ("market.name" %in% names ) data_new$market.name <- data_new$region
return( data_new[ names ] )
}
#remove non-existent AEZs
remove_AEZ_nonexist <- function( data, AEZcol = "AgSupplySubsector", AEZnonexist = L125.R_AEZ_nonexist ){
AEZnonexist <- add_region_name( AEZnonexist )
data[[AEZ]] <- substr( as.character( data[[AEZcol]] ), nchar( as.character( data[[AEZcol]] ) ) - 4, nchar( as.character( data[[AEZcol]] ) ) )
data <- data[ !vecpaste( data[ c( reg, AEZ ) ] ) %in% vecpaste( AEZnonexist[ c( reg, AEZ ) ] ), ]
data <- data[ names( data ) != AEZ ]
}
#add_agtech_names: function to use the commodity and AEZ names to create agsupplysectors, subsectors, technologies
add_agtech_names <- function( data ){
data[[agsupp]] <- data[[C]]
data[[agsubs]] <- paste( data[[C]], data[[AEZ]], sep = AEZ_delimiter )
data[[agtech]] <- paste( data[[C]], data[[AEZ]], sep = AEZ_delimiter )
return( data )
}
#append_AEZ: function to append AEZ to all specified variables
append_AEZ <- function( data, var1 = "LandNode1", var2 = NA, var3 = NA, var4 = NA, var5 = NA ){
data[[var1]] <- paste( data[[var1]], data[[AEZ]], sep = AEZ_delimiter )
if( !is.na( var2 ) ){ data[[var2]] <- paste( data[[var2]], data[[AEZ]], sep = AEZ_delimiter ) }
if( !is.na( var3 ) ){ data[[var3]] <- paste( data[[var3]], data[[AEZ]], sep = AEZ_delimiter ) }
if( !is.na( var4 ) ){ data[[var4]] <- paste( data[[var4]], data[[AEZ]], sep = AEZ_delimiter ) }
if( !is.na( var5 ) ){ data[[var5]] <- paste( data[[var5]], data[[AEZ]], sep = AEZ_delimiter ) }
return( data )
}
#add_node_leaf_names: function to match in the node and leaf names from a land nesting table
add_node_leaf_names <- function( data, nesting_table, leaf_name, LT_name = LT, LN1 = "LandNode1", LN2 = NA, LN3 = NA, LN4 = NA, append_AEZ = T ){
data$LandAllocatorRoot <- "root"
data[[LN1]] <- nesting_table[[LN1]][ match( data[[LT_name]], nesting_table[[leaf_name]] ) ]
if( !is.na( LN2 ) ){ data[[LN2]] <- nesting_table[[LN2]][ match( data[[LT_name]], nesting_table[[leaf_name]] ) ] }
if( !is.na( LN3 ) ){ data[[LN3]] <- nesting_table[[LN3]][ match( data[[LT_name]], nesting_table[[leaf_name]] ) ] }
if( !is.na( LN4 ) ){ data[[LN4]] <- nesting_table[[LN4]][ match( data[[LT_name]], nesting_table[[leaf_name]] ) ] }
data[[leaf_name]] <- data[[LT_name]]
if( append_AEZ == T ){ data <- append_AEZ( data, var1 = leaf_name, var2 = LN1, var3 = LN2, var4 = LN3, var5 = LN4 ) }
return( data )
}
#add_carbon_info: function to add in the carbon densities and mature ages from specified tables, matching on specified parameters
add_carbon_info <- function( data, veg_data, soil_data, age_data, LT_name = "Cdensity_LT" ){
data$hist.veg.carbon.density <- veg_data$value[
match( vecpaste( data[ c( "region", LT_name, AEZ ) ] ),
vecpaste( veg_data[ c( "region", LT, AEZ ) ] ) ) ]
data$hist.soil.carbon.density <- soil_data$value[
match( vecpaste( data[ c( "region", LT_name, AEZ ) ] ),
vecpaste( soil_data[ c( "region", LT, AEZ ) ] ) ) ]
data$mature.age.year.fillout <- min( model_base_years )
data$mature.age <- age_data$value[
match( vecpaste( data[ c( "region", LT_name, AEZ ) ] ),
vecpaste( age_data[ c( "region", LT, AEZ ) ] ) ) ]
data$veg.carbon.density <- data$hist.veg.carbon.density
data$soil.carbon.density <- data$hist.soil.carbon.density
data$min.veg.carbon.density <- min.veg.carbon.density
data$min.soil.carbon.density <- min.soil.carbon.density
return( data )
}
reduce_mgd_carbon <- function( data, LTfor = "Forest", LTpast = "Pasture" ){
data[ data[[LT]] == LTpast, c( "hist.veg.carbon.density", "veg.carbon.density" ) ] <-
data[ data[[LT]] == LTpast, c( "hist.veg.carbon.density", "veg.carbon.density" ) ] * Cveg_Mult_UnmgdPast_MgdPast
data[ data[[LT]] == LTpast, c( "hist.soil.carbon.density", "soil.carbon.density" ) ] <-
data[ data[[LT]] == LTpast, c( "hist.soil.carbon.density", "soil.carbon.density" ) ] * Csoil_Mult_UnmgdPast_MgdPast
data[ data[[LT]] == LTfor, c( "hist.veg.carbon.density", "veg.carbon.density" ) ] <-
data[ data[[LT]] == LTfor, c( "hist.veg.carbon.density", "veg.carbon.density" ) ] * Cveg_Mult_UnmgdFor_MgdFor
data[ data[[LT]] == LTfor, c( "hist.soil.carbon.density", "soil.carbon.density" ) ] <-
data[ data[[LT]] == LTfor, c( "hist.soil.carbon.density", "soil.carbon.density" ) ] * Csoil_Mult_UnmgdFor_MgdFor
return( data )
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plusDomains_objects.R
\name{Comment.plusoners}
\alias{Comment.plusoners}
\title{Comment.plusoners Object}
\usage{
Comment.plusoners(totalItems = NULL)
}
\arguments{
\item{totalItems}{Total number of people who +1'd this comment}
}
\value{
Comment.plusoners object
}
\description{
Comment.plusoners Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
People who +1'd this comment.
}
\seealso{
Other Comment functions: \code{\link{Comment.actor.clientSpecificActorInfo.youtubeActorInfo}},
\code{\link{Comment.actor.clientSpecificActorInfo}},
\code{\link{Comment.actor.image}},
\code{\link{Comment.actor.verification}},
\code{\link{Comment.actor}},
\code{\link{Comment.inReplyTo}},
\code{\link{Comment.object}}, \code{\link{Comment}},
\code{\link{comments.insert}}
}
| /googleplusDomainsv1.auto/man/Comment.plusoners.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 892 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plusDomains_objects.R
\name{Comment.plusoners}
\alias{Comment.plusoners}
\title{Comment.plusoners Object}
\usage{
Comment.plusoners(totalItems = NULL)
}
\arguments{
\item{totalItems}{Total number of people who +1'd this comment}
}
\value{
Comment.plusoners object
}
\description{
Comment.plusoners Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
People who +1'd this comment.
}
\seealso{
Other Comment functions: \code{\link{Comment.actor.clientSpecificActorInfo.youtubeActorInfo}},
\code{\link{Comment.actor.clientSpecificActorInfo}},
\code{\link{Comment.actor.image}},
\code{\link{Comment.actor.verification}},
\code{\link{Comment.actor}},
\code{\link{Comment.inReplyTo}},
\code{\link{Comment.object}}, \code{\link{Comment}},
\code{\link{comments.insert}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raceTab1.R
\name{raceTab1}
\alias{raceTab1}
\title{raceTab1 Table showing the percentage values by ethnic/race categories}
\usage{
raceTab1(fips, ctyname, ACS, oType)
}
\arguments{
\item{fips}{the short FIPS code}
\item{ctyname}{Place Name}
\item{ACS}{data depository from the American Community Survey API}
\item{oType}{Type of Output html, latex}
}
\value{
kable formatted table and data file
}
\description{
pulls data from API This table shows a set of histoical comparisons between
the 2000 Census, the 2010 Census and the latest ACS API
}
\details{
This table does not report MOEs for ACS series, because of the lack of cunsus MOEs...
}
| /man/raceTab1.Rd | no_license | ColoradoDemography/ProfileDashboard | R | false | true | 755 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raceTab1.R
\name{raceTab1}
\alias{raceTab1}
\title{raceTab1 Table showing the percentage values by ethnic/race categories}
\usage{
raceTab1(fips, ctyname, ACS, oType)
}
\arguments{
\item{fips}{the short FIPS code}
\item{ctyname}{Place Name}
\item{ACS}{data depository from the American Community Survey API}
\item{oType}{Type of Output html, latex}
}
\value{
kable formatted table and data file
}
\description{
pulls data from API This table shows a set of histoical comparisons between
the 2000 Census, the 2010 Census and the latest ACS API
}
\details{
This table does not report MOEs for ACS series, because of the lack of cunsus MOEs...
}
|
#' @title przekształca dane z postaci długiej do szerokiej
#' @description \code{\link[reshape2]{dcast}} oraz \code{\link[tidyr]{spread}}
#' są o tyle niewygodne, że wymagają danych w postaci "kanonicznie długiej".
#' Tymczasem wygodnie jest móc przekształcić do postaci "prawdziwie szerokiej"
#' dane w formie "pośredniej", np. `okres` (wartości 1, 2, 3), `zm1`, `zm2`
#' przekształcić na `zm1_1`, `zm1_2`, `zm1_3`, `zm2_1`, `zm2_2`, `zm2_3`.
#'
#' Funkcja dostarcza skrótowej składni do wykonania takiej właśnie operacji.
#'
#' Wewnętrznie wykonuje \code{\link[tidyr]{spread}} na każdej z wyznaczonych
#' zmiennych, a następnie łączy wyniki za pomocą
#' \code{\link[dplyr]{full_join}}.
#' @details Jeśli parametr \code{id} (lub \code{zmienne}) nie zostanie podany,
#' zostanie on wywiedziony jako "wszystkie kolumny danych wejściowych z
#' wyłączeniem tych, na które wskazuje parametr \code{klucz} oraz \code{zmienne}
#' (\code{id})". Oznacza to oczywiście, że przynajmnie jeden z parametrów
#' \code{id} i \code{zmienne} musi zostać podany.
#' @param dane ramka danych z danymi w postaci "pośredniej"
#' @param klucz kolumna zawierająca sufiksy, które stworzą nowe kolumny
#' @param id wektor nazw zmiennych będących kluczami (dane wyjściowe będą miały
#' tyle wierszy, ile jest unikalnych kombinacji wartości tych kolumn w danych
#' wejściowych)
#' @param zmienne wektor nazw zmiennych, które mają zostać przetworzone do
#' postaci "szerokiej"
#' @import dplyr
#' @export
dl2sz = function(dane, klucz, id = character(), zmienne = character()) {
stopifnot(
is.data.frame(dane),
is.vector(klucz), is.character(klucz), length(klucz) == 1, all(klucz %in% names(dane)),
is.vector(id), is.character(id), all(id %in% names(dane)),
is.vector(zmienne), is.character(zmienne), all(zmienne %in% names(dane)),
length(zmienne) + length(id) > 0
)
if (length(id) == 0) {
id = setdiff(names(dane), c(zmienne, klucz))
}
if (length(zmienne) == 0) {
zmienne = setdiff(names(dane), c(id, klucz))
}
wynik = dane %>%
select_(.dots = id) %>%
distinct()
for (zm in zmienne) {
tmp = dane %>%
select_(.dots = c(id, klucz, zm)) %>%
rename_(.klucz = klucz) %>%
mutate_(.klucz = ~paste0(zm, .klucz)) %>%
tidyr::spread_('.klucz', zm)
wynik = full_join(wynik, tmp)
}
return(wynik)
} | /R/dl2sz.R | permissive | zozlak/MLAK | R | false | false | 2,426 | r | #' @title przekształca dane z postaci długiej do szerokiej
#' @description \code{\link[reshape2]{dcast}} oraz \code{\link[tidyr]{spread}}
#' są o tyle niewygodne, że wymagają danych w postaci "kanonicznie długiej".
#' Tymczasem wygodnie jest móc przekształcić do postaci "prawdziwie szerokiej"
#' dane w formie "pośredniej", np. `okres` (wartości 1, 2, 3), `zm1`, `zm2`
#' przekształcić na `zm1_1`, `zm1_2`, `zm1_3`, `zm2_1`, `zm2_2`, `zm2_3`.
#'
#' Funkcja dostarcza skrótowej składni do wykonania takiej właśnie operacji.
#'
#' Wewnętrznie wykonuje \code{\link[tidyr]{spread}} na każdej z wyznaczonych
#' zmiennych, a następnie łączy wyniki za pomocą
#' \code{\link[dplyr]{full_join}}.
#' @details Jeśli parametr \code{id} (lub \code{zmienne}) nie zostanie podany,
#' zostanie on wywiedziony jako "wszystkie kolumny danych wejściowych z
#' wyłączeniem tych, na które wskazuje parametr \code{klucz} oraz \code{zmienne}
#' (\code{id})". Oznacza to oczywiście, że przynajmnie jeden z parametrów
#' \code{id} i \code{zmienne} musi zostać podany.
#' @param dane ramka danych z danymi w postaci "pośredniej"
#' @param klucz kolumna zawierająca sufiksy, które stworzą nowe kolumny
#' @param id wektor nazw zmiennych będących kluczami (dane wyjściowe będą miały
#' tyle wierszy, ile jest unikalnych kombinacji wartości tych kolumn w danych
#' wejściowych)
#' @param zmienne wektor nazw zmiennych, które mają zostać przetworzone do
#' postaci "szerokiej"
#' @import dplyr
#' @export
dl2sz = function(dane, klucz, id = character(), zmienne = character()) {
stopifnot(
is.data.frame(dane),
is.vector(klucz), is.character(klucz), length(klucz) == 1, all(klucz %in% names(dane)),
is.vector(id), is.character(id), all(id %in% names(dane)),
is.vector(zmienne), is.character(zmienne), all(zmienne %in% names(dane)),
length(zmienne) + length(id) > 0
)
if (length(id) == 0) {
id = setdiff(names(dane), c(zmienne, klucz))
}
if (length(zmienne) == 0) {
zmienne = setdiff(names(dane), c(id, klucz))
}
wynik = dane %>%
select_(.dots = id) %>%
distinct()
for (zm in zmienne) {
tmp = dane %>%
select_(.dots = c(id, klucz, zm)) %>%
rename_(.klucz = klucz) %>%
mutate_(.klucz = ~paste0(zm, .klucz)) %>%
tidyr::spread_('.klucz', zm)
wynik = full_join(wynik, tmp)
}
return(wynik)
} |
library(tidyverse)
library(lubridate)
library(DBI)
library(odbc)
library(dbplyr)
library(httr)
library(jsonlite)
library(rvest)
library(furrr)
library(slider)
library(pdftools)
library(fs)
source("00_script/db_function.R")
#
# test <- fnlist %>% slice(1)
# BULLET_KIND_OLD <- test$BULLET_KIND_OLD
# OFFER_INST_CD <- test$OFFER_INST_CD
# BULLET_DT <- test$BULLET_DT
# BULLET_NO_OLD <- test$BULLET_NO_OLD
# tmp <- tempfile()
fn_login <- function() {
login <- "https://www.fnguide.com/home/login"
pgsession <<- session(login)
pgform <- html_form(pgsession)[[1]]
filled_form <- html_form_set(pgform, MemberID = "research1", PassWord = "research2")
session_submit(pgsession, filled_form)
}
filter_new_report_no <- function()
save_pdf <- function(FILTER_BULLET_DT,
BULLET_NO_OLD,
ITEM_NM,
BULLET_KIND_OLD,
OFFER_INST_CD,
BULLET_DT,
row_number,
...) {
tmp <- tempfile()
res <- session_jump_to(
pgsession,
url = 'https://file.fnguide.com/upload1/SVR_pdfDownload.asp?',
query = list(
bukd = BULLET_KIND_OLD,
ofincd = OFFER_INST_CD,
btym = str_sub(BULLET_DT, 1, 6),
buno = BULLET_NO_OLD
),
write_disk(tmp, overwrite = TRUE)
)
res <- tibble(BULLET_NO_OLD = BULLET_NO_OLD,
PDF_RAW = pdf_text(tmp) %>% str_c(collapse = " "))
# dbWriteTable(con, "FN_PDF", res, append = TRUE)
write_table_to_database(res, "FN_PDF", append = TRUE)
print(str_glue("{FILTER_BULLET_DT} {BULLET_NO_OLD} done."))
}
fn_login()
con <- dbConnect(RSQLite::SQLite(), "TextDat.sqlite")
fn_list <- tbl(con, "FN_COMP_LIST") %>% collect()
bullet_vec <- tbl(con, "FN_PDF") %>% pull(BULLET_NO_OLD)
fn_list_new <- fn_list %>%
filter(!BULLET_NO_OLD %in% bullet_vec)
fn_list_new %>%
pwalk(save_pdf)
dbDisconnect(con)
#
# # pdf로 다운받은거 DB에 저장하는 부분
# file_path <- dir_ls("./01_pdf") %>% `[`(1)
#
# save_pdf <- function(file_path) {
# bullet_no_old <- file_path %>% str_split("_", simplify = TRUE) %>% `[`(1, 3)
#
# pdf_text_raw <- pdf_text(file_path) %>%
# str_c(collapse = " ")
#
# res <- tibble(BULLET_NO_OLD = bullet_no_old,
# PDF_RAW = pdf_text_raw)
#
# dbWriteTable(con, "FN_PDF", res, append = TRUE)
#
# print(str_glue("{bullet_no_old} Done ... !"))
# }
#
# dir_ls("./01_pdf") %>%
# walk(save_pdf)
| /00_script/pdf_download2.R | no_license | ruddnr/text_dat2 | R | false | false | 2,610 | r | library(tidyverse)
library(lubridate)
library(DBI)
library(odbc)
library(dbplyr)
library(httr)
library(jsonlite)
library(rvest)
library(furrr)
library(slider)
library(pdftools)
library(fs)
source("00_script/db_function.R")
#
# test <- fnlist %>% slice(1)
# BULLET_KIND_OLD <- test$BULLET_KIND_OLD
# OFFER_INST_CD <- test$OFFER_INST_CD
# BULLET_DT <- test$BULLET_DT
# BULLET_NO_OLD <- test$BULLET_NO_OLD
# tmp <- tempfile()
fn_login <- function() {
login <- "https://www.fnguide.com/home/login"
pgsession <<- session(login)
pgform <- html_form(pgsession)[[1]]
filled_form <- html_form_set(pgform, MemberID = "research1", PassWord = "research2")
session_submit(pgsession, filled_form)
}
filter_new_report_no <- function()
save_pdf <- function(FILTER_BULLET_DT,
BULLET_NO_OLD,
ITEM_NM,
BULLET_KIND_OLD,
OFFER_INST_CD,
BULLET_DT,
row_number,
...) {
tmp <- tempfile()
res <- session_jump_to(
pgsession,
url = 'https://file.fnguide.com/upload1/SVR_pdfDownload.asp?',
query = list(
bukd = BULLET_KIND_OLD,
ofincd = OFFER_INST_CD,
btym = str_sub(BULLET_DT, 1, 6),
buno = BULLET_NO_OLD
),
write_disk(tmp, overwrite = TRUE)
)
res <- tibble(BULLET_NO_OLD = BULLET_NO_OLD,
PDF_RAW = pdf_text(tmp) %>% str_c(collapse = " "))
# dbWriteTable(con, "FN_PDF", res, append = TRUE)
write_table_to_database(res, "FN_PDF", append = TRUE)
print(str_glue("{FILTER_BULLET_DT} {BULLET_NO_OLD} done."))
}
fn_login()
con <- dbConnect(RSQLite::SQLite(), "TextDat.sqlite")
fn_list <- tbl(con, "FN_COMP_LIST") %>% collect()
bullet_vec <- tbl(con, "FN_PDF") %>% pull(BULLET_NO_OLD)
fn_list_new <- fn_list %>%
filter(!BULLET_NO_OLD %in% bullet_vec)
fn_list_new %>%
pwalk(save_pdf)
dbDisconnect(con)
#
# # pdf로 다운받은거 DB에 저장하는 부분
# file_path <- dir_ls("./01_pdf") %>% `[`(1)
#
# save_pdf <- function(file_path) {
# bullet_no_old <- file_path %>% str_split("_", simplify = TRUE) %>% `[`(1, 3)
#
# pdf_text_raw <- pdf_text(file_path) %>%
# str_c(collapse = " ")
#
# res <- tibble(BULLET_NO_OLD = bullet_no_old,
# PDF_RAW = pdf_text_raw)
#
# dbWriteTable(con, "FN_PDF", res, append = TRUE)
#
# print(str_glue("{bullet_no_old} Done ... !"))
# }
#
# dir_ls("./01_pdf") %>%
# walk(save_pdf)
|
# Program Summary ---------------------------------------------------------
# Script to run Power Analysis to determine ability to detect trends
# in seabird colonies using data from Acoustic monitors and nest counts,
# based on results from existing analyses
# NOTE: this script must be called up from "set-up" script that sets params
#
#
# Load necessary libraries (may need to install some of these) ------------
library(ggplot2)
library(dplyr)
library(gdata)
library(mvtnorm)
library(stats)
library(boot)
library(stargazer)
library(lme4)
library(car)
# Load Data ---------------------------------------------------------------
# Load results from Acoustic analysis (param values)
attach(loadfile);
s_statsA <- s_stats;
Stratalist <- Stratalist
vnA <- vn
detach(paste0('file:',loadfile),character.only = TRUE)
# rm(vn)
if(Countfreq==0){
ii = which(Stratalist$StratName==Strata_trends)
CR0 = s_statsA$Mean[which(startsWith(vnA,paste0("C[",ii)))]
minCsite = min(s_statsA$Mean[which(startsWith(vnA,paste0("Csite[",ii)))] )
Dispers = s_statsA$Mean[which(startsWith(vnA,"Dispers"))]
sigS = s_statsA$Mean[which(startsWith(vnA,"sigS"))] # variance CR across sites
Vs = sigS^2
NSite = NSiteA
Nyrs = NyrsP
Years = 1:Nyrs
# Initialize Variables
# Stats for calls
r_est = numeric(length=simreps)
Pval = numeric(length=simreps)
Psig = numeric(length=simreps)
r_CI = matrix(nrow=simreps,ncol=2)
#
C = matrix(0,nrow = simreps,ncol = Nyrs)
Ahat = matrix(0,nrow = simreps,ncol = Nyrs)
Ahatsd = matrix(0,nrow = simreps,ncol = Nyrs)
# Some matrices that get over-written
EstA = matrix(0,nrow = NSite, ncol = Nyrs)
# Run Sims
for (r in 1:simreps){
C[r,1] = CR0
for (t in 2:Nyrs){
C[r,t] = C[r,t-1]*exp(rnorm(1,TRUE_r,Sigma_r))
}
cntr = 0;
for (t in 1:Nyrs){
cntr = cntr + 1
muS = log(C[r,t]/sqrt(1+Vs/C[r,t]^2))
sgS = sqrt(log(1+ Vs/C[r,t]^2))
Csite = numeric(length=NSite)
for (s in 1:NSite){
Csite[s] = rlnorm(1,muS,sgS)
mu = Csite[s]
vr = ifelse(mu<0,0.0000001, mu + (mu^2)/Dispers)
p = max(0.00000001,1-(vr-mu)/vr)
Calls = rnbinom(RecPsite,Dispers,p)
EstA[s,t] = max(minCsite/20,mean(Calls))
}
Ahat[r,t] = mean(EstA[,t])
Ahatsd[r,t] = sd(EstA[,t])
# tmp=C[r,t]-Ahat[r,t] # Not sure if this is necessary
}
# Stats for Call rate trend est
lgN = log(Ahat[r,])
fit = lm(lgN ~ Years)
# summary(fit)
r_est[r] = as.numeric(fit$coefficients[2])
# Pval[r] = lmp(fit)
Pval[r] = as.numeric(summary(fit)$coefficients[2,4])
r_CI[r,] = as.numeric(confint(fit,level=P_signif)[2,])
if (sign(r_CI[r,1])==sign(TRUE_r) & sign(r_CI[r,2])==sign(TRUE_r)){
Psig[r] = 1
}else{
Psig[r] = 0
}
}
# Summarize sim stats ---------------------------------------------------
# C_true = colMeans(C, na.rm = "T")
C_true = apply(C,2,quantile,.5)
C_true_CIL = apply(C,2,quantile,.025)
C_true_CIH = apply(C,2,quantile,.975)
# CR_estA = colMeans(Ahat, na.rm = "T")
# CR_estA_CIL = colMeans(Ahat-1.96*(Ahatsd/sqrt(NSite)), na.rm = "T")
# CR_estA_CIH = colMeans(Ahat+1.96*(Ahatsd/sqrt(NSite)), na.rm = "T")
tmp = apply(Ahat,2,quantile,.5)
cor = C_true[1]/tmp[1]
CR_estA = tmp*cor
# CR_estA_CIL = apply(Ahat,2,quantile,.05)
# CR_estA_CIH = apply(Ahat,2,quantile,.95)
CR_estA_CIL = CR_estA -(apply(Ahat,2,sd))/sqrt(NSite)
CR_estA_CIH = CR_estA +(apply(Ahat,2,sd))/sqrt(NSite)
x = seq(1,Nyrs)
# Trend plots:
# Call rate estimates
dat = data.frame(Year=x,Esimate=CR_estA,lower=CR_estA_CIL,upper=CR_estA_CIH,True=C_true)
pltR1 = ggplot() +
geom_errorbar(data=dat, mapping=aes(x=Year, ymax=upper, ymin=lower), width=0.2, size=1, color="blue") +
geom_point(data=dat, mapping=aes(x=Year, y=Esimate), size=4, shape=21, fill="white") +
geom_line(data=dat, mapping=aes(x=Year, y=True),linetype = "dashed")+
labs(x="Year", y = "Estimated Call Rate") +
labs(title = paste0("Estimated Call Rate Over Time, r = ",TRUE_r, ", process error = ", Sigma_r,
", "),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites and ",RecPsite,
" Acoustic Records per site (Dashed line = Actual Trend)"))
print(pltR1)
# Power Stats Summaries -------------------------------------------------------------
# 1) Power to Detect Trends in Call rates
sample_Rmn <- function(x, d) {
return(rnorm(1,mean(x[d]), sd(x[d])/sqrt(NSite)))
}
Power_mn <- function(x, d) {
return(100*(length(subset(x[d],x[d]<=(1-P_signif)))/length(x[d])))
}
PowerCR = 100*(length(subset(Pval,Pval<=(1-P_signif)))/length(Pval))
PowerCR_samp = boot(Pval, Power_mn, R=1000)
dfPower = data.frame(Method = "Acoustic Data, Call rate",Power=PowerCR_samp$t)
P_sig = mean(Psig)
r_CIs = colMeans(r_CI)
mean_r_est = mean(r_est)
r_est_bt = boot(r_est, sample_Rmn, R=10000)
r_est_bt = r_est_bt$t
Powersum = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = NSite, N_CPM15_st = RecPsite,
Est_r = format(mean_r_est, digits=3),
CI_r_Lo = format(r_CIs[1], digits = 3),
CI_r_Hi = format(r_CIs[2], digits = 3),
Power = format(PowerCR, digits=3))
print("Power Summary, Trend in Call Rate Estimated from Acoustic Data")
stargazer(Powersum, type = 'text', out = 'out.txt', summary=FALSE, rownames=FALSE)
print(" ")
print(" ")
df = data.frame(N_Sites = NSite, Estimate = r_est_bt)
plt = ggplot(df, aes(x=Estimate, fill = NSite)) + geom_density(alpha=.3) +
labs(title = paste0("Probability of Detecting trend in Call Rate"),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites and ",
RecPsite, " Acoustic Records per site" ),
x="Estimated Trend in Call Rate", y = "Probability of Estimate") +
geom_vline(xintercept = TRUE_r, colour = 'red') +
annotate("text", x = TRUE_r, y = 5, label = "True r", colour = 'red', hjust = -.3) +
geom_vline(xintercept = r_CIs[1], colour = 'magenta', linetype="dashed") +
geom_vline(xintercept = r_CIs[2], colour = 'magenta', linetype="dashed") +
theme(legend.position="none")
print(plt)
}else{
# Load Call-Count conversion function
attach(loadfile2);
Convfxn = Convertfxn
Convplot = Convertplot
s_statsC <- s_stats;
vnC = vn
detach(paste0('file:',loadfile2),character.only = TRUE)
AB = rmvnorm(n=simreps, mean=Convfxn$means, sigma=Convfxn$covmat)
alphR = AB[,1]; BetaR = AB[,2]; rm(AB)
# Load count data
dfArea = read.csv(file = Areasdatfile, header = TRUE, sep = ",")
dfC = read.csv(file = loadCdat, header = TRUE, sep = ",")
#
# Get params for simulations ----------------------------------------------
#
# Fit linear mixed model to nest counts to get estimates of variance
# for sites within year and sequential counts within site
ii = which(dfC$StrataName==Strata_trends)
dfNC = dfC[ii,]
dfNC$logNC = log(dfNC$Density+1.0001)
lmm <- lmer(Density ~ contract_year + (1 | SPID), data = dfNC,
REML = FALSE)
# summary(lmm)
# Test = Anova(lmm) # get P value for linear mixed model
tmp = as.data.frame(VarCorr(lmm)) # extract variance components
sigN = tmp$sdcor[1]
sigC = tmp$sdcor[2]
Yrcounts = unique(dfNC$contract_year)
# Dens0 = mean(dfNC$Density[dfNC$contract_year==min(Yrcounts)]) # Initial nest density
ii = which(Stratalist$StratName==Strata_trends)
CR0 = s_statsA$Mean[which(startsWith(vnA,paste0("C[",ii)))]
minCsite = min(s_statsA$Mean[which(startsWith(vnA,paste0("Csite[",ii)))] )
Dispers = s_statsA$Mean[which(startsWith(vnA,"Dispers"))]
sigS = s_statsA$Mean[which(startsWith(vnA,"sigS"))] # variance CR across sites
sigD = s_statsC$Mean[which(startsWith(vnC,"sigD"))] # Variance in dens est from calls
alph = s_statsC$Mean[which(startsWith(vnC,"alpha"))] # Param 1 for call-count convers
Beta= s_statsC$Mean[which(startsWith(vnC,"Beta"))] # Param 2 for call-count convers
Dens0 = alph*CR0^Beta
#rm(s_statsC)
# rm(s_statsA)
Vn = sigN^2
Vc = sigC^2
Vs = sigS^2
Vd = sigD^2
NSite = NSiteA
Nyrs = NyrsP
Years = 1:Nyrs
Ncnts = NSiteC
# Initialize Variables
# Stats for calls
r_est = numeric(length=simreps)
Pval = numeric(length=simreps)
Psig = numeric(length=simreps)
r_CI = matrix(nrow=simreps,ncol=2)
# Stats for density est by acoustic data
rA_est = numeric(length=simreps)
PvalA = numeric(length=simreps)
PsigA = numeric(length=simreps)
rA_CI = matrix(nrow=simreps,ncol=2)
# Stats for density est by nest counts
rC_est = numeric(length=simreps)
PvalC = numeric(length=simreps)
PsigC = numeric(length=simreps)
rC_CI = matrix(nrow=simreps,ncol=2)
C = matrix(0,nrow = simreps,ncol = Nyrs)
D = matrix(0,nrow = simreps,ncol = Nyrs)
DhatA = matrix(0,nrow = simreps,ncol = Nyrs)
DhatAsd = matrix(0,nrow = simreps,ncol = Nyrs)
DhatC = matrix(0,nrow = simreps,ncol = Nyrs)
DhatCsd = matrix(0,nrow = simreps,ncol = Nyrs)
Ahat = matrix(0,nrow = simreps,ncol = Nyrs)
Ahatsd = matrix(0,nrow = simreps,ncol = Nyrs)
# Some matrices that get over-written
EstD = matrix(0,nrow = NSite, ncol = Nyrs)
EstA = matrix(0,nrow = NSite, ncol = Nyrs)
EstNC = matrix(0,nrow = Ncnts, ncol = Nyrs)
NCT = numeric(length=NcountsPSite)
# Nste = numeric(length=Ncnts)
# Run Sims
for (r in 1:simreps){
D[r,1] = Dens0
C[r,1] = CR0
for (t in 2:Nyrs){
D[r,t] = D[r,t-1]*exp(rnorm(1,TRUE_r,Sigma_r))
# C[r,t] = C[r,t-1]*exp(rnorm(1,TRUE_r,Sigma_r))
C[r,t] = (D[r,t]/alph)^(1/Beta)
# D[r,t] = alph*C[r,t]^Beta
}
cntr = 0;
for (t in 1:Nyrs){
cntr = cntr + 1
muS = log(C[r,t]/sqrt(1+Vs/C[r,t]^2))
sgS = sqrt(log(1+ Vs/C[r,t]^2))
muN = log(D[r,t]/sqrt(1+Vn/D[r,t]^2))
sgN = sqrt(log(1+ Vn/D[r,t]^2))
Csite = numeric(length=NSite)
DensN = numeric(length=Ncnts)
if (cntr == Countfreq){
for (s in 1:Ncnts){
DensN[s] = rlnorm(1,muN,sgN)
muC = log(DensN[s]/sqrt(1+Vc/DensN[s]^2))
sgC = sqrt(log(1+ Vc/DensN[s]^2))
for (c in 1:NcountsPSite){
NCT[c] = rlnorm(1,muC,sgC)
}
EstNC[s,t] = mean(NCT[c])
}
DhatC[r,t] = mean(EstNC[,t])
DhatCsd[r,t] = sd(EstNC[,t])
cntr = 0;
}else{
DhatC[r,t] = NA
DhatCsd[r,t] = NA
}
for (s in 1:NSite){
Csite[s] = rlnorm(1,muS,sgS)
mu = Csite[s]
vr = ifelse(mu<0,0.0000001, mu + (mu^2)/Dispers)
p = max(0.00000001,1-(vr-mu)/vr)
Calls = rnbinom(RecPsite,Dispers,p)
EstA[s,t] = max(minCsite/20,mean(Calls))
expD = alph*EstA[s,t]^Beta
muD = log(expD/sqrt(1+Vd/expD^2))
sgD = sqrt(log(1+ Vd/expD^2))
EstD[s,t] = rlnorm(1,muD,sgD)
# EstD[s,t] = expD
}
Ahat[r,t] = mean(EstA[,t])
Ahatsd[r,t] = sd(EstA[,t])
# DhatA[r,t] = mean(EstD[,t])
DhatA[r,t] = alphR[r]*Ahat[r,t]^BetaR[r]
DhatAsd[r,t] = sd(EstD[,t])
tmp=C[r,t]-Ahat[r,t]
}
# Stats for Call rate trend est
lgN = log(Ahat[r,])
fit = lm(lgN ~ Years)
# summary(fit)
r_est[r] = as.numeric(fit$coefficients[2])
# Pval[r] = lmp(fit)
Pval[r] = as.numeric(summary(fit)$coefficients[2,4])
r_CI[r,] = as.numeric(confint(fit,level=P_signif)[2,])
if (sign(r_CI[r,1])==sign(TRUE_r) & sign(r_CI[r,2])==sign(TRUE_r)){
Psig[r] = 1
}else{
Psig[r] = 0
}
# Stats for acoustic data density est
lgNA = log(DhatA[r,])
fitA = lm(lgNA ~ Years, na.action=na.omit)
# summary(fitA)
# PvalC[r] = lmp(fitC)
rA_est[r] = as.numeric(fitA$coefficients[2])
PvalA[r] = as.numeric(summary(fitA)$coefficients[2,4])
rA_CI[r,] = as.numeric(confint(fitA,level=P_signif)[2,])
if (sign(rA_CI[r,1])==sign(TRUE_r) & sign(rA_CI[r,2])==sign(TRUE_r)){
PsigA[r] = 1
}else{
PsigA[r] = 0
}
# Stats for nest count density est
npts = sum(!is.na(DhatC[r,]))
if(npts>2){
lgNC = log(DhatC[r,])
fitC = lm(lgNC ~ Years, na.action=na.omit)
# summary(fit)
# PvalC[r] = lmp(fitC)
rC_est[r] = as.numeric(fitC$coefficients[2])
PvalC[r] = as.numeric(summary(fitC)$coefficients[2,4])
rC_CI[r,] = as.numeric(confint(fitC,level=P_signif)[2,])
if (sign(rC_CI[r,1])==sign(TRUE_r) & sign(rC_CI[r,2])==sign(TRUE_r)){
PsigC[r] = 1
}else{
PsigC[r] = 0
}
}
}
# Summarize sim stats ---------------------------------------------------
#D_true = colMeans(D, na.rm = "T")
D_true = apply(D,2,quantile,.5)
D_true_CIL = apply(D,2,quantile,.025)
D_true_CIH = apply(D,2,quantile,.975)
# C_true = colMeans(C, na.rm = "T")
C_true = apply(C,2,quantile,.5)
C_true_CIL = apply(C,2,quantile,.025)
C_true_CIH = apply(C,2,quantile,.975)
# CR_estA = colMeans(Ahat, na.rm = "T")
# CR_estA_CIL = colMeans(Ahat-1.96*(Ahatsd/sqrt(NSite)), na.rm = "T")
# CR_estA_CIH = colMeans(Ahat+1.96*(Ahatsd/sqrt(NSite)), na.rm = "T")
CR_estA = apply(Ahat,2,quantile,.5)
# CR_estA_CIL = apply(Ahat,2,quantile,.05)
# CR_estA_CIH = apply(Ahat,2,quantile,.95)
CR_estA_CIL = CR_estA -(apply(Ahat,2,sd))/sqrt(NSite)
CR_estA_CIH = CR_estA +(apply(Ahat,2,sd))/sqrt(NSite)
#D_estA = colMeans(DhatA, na.rm = "T")
#D_estA_CIL = colMeans(DhatA-1.96*(DhatAsd/sqrt(NSite)), na.rm = "T")
# D_estA_CIH = colMeans(DhatA+1.96*(DhatAsd/sqrt(NSite)), na.rm = "T")
D_estA = apply(DhatA,2,quantile,.5)
# D_estA_CIL = apply(DhatA,2,quantile,.05)
# D_estA_CIH = apply(DhatA,2,quantile,.95)
D_estA_CIL = D_estA - (apply(DhatA,2,sd))/sqrt(NSite)
D_estA_CIH = D_estA + (apply(DhatA,2,sd))/sqrt(NSite)
# D_estC = colMeans(DhatC, na.rm = "T")
# D_estC_CIL = colMeans(DhatC-1.96*(DhatCsd/sqrt(Ncnts)), na.rm = "T")
# D_estC_CIH = colMeans(DhatC+1.96*(DhatCsd/sqrt(Ncnts)), na.rm = "T")
D_estC = apply(DhatC,2,quantile,.5,na.rm = "T")
# D_estC_CIL = apply(DhatC,2,quantile,.05,na.rm = "T")
# D_estC_CIH = apply(DhatC,2,quantile,.95,na.rm = "T")
D_estC_CIL = D_estC - (apply(DhatC,2,sd,na.rm = "T"))/sqrt(Ncnts)
D_estC_CIH = D_estC + (apply(DhatC,2,sd,na.rm = "T"))/sqrt(Ncnts)
x = seq(1,Nyrs)
# Trend plots:
# Call rate estimates
dat = data.frame(Year=x,Esimate=CR_estA,lower=CR_estA_CIL,upper=CR_estA_CIH,True=C_true)
pltR1 = ggplot() +
geom_errorbar(data=dat, mapping=aes(x=Year, ymax=upper, ymin=lower), width=0.2, size=1, color="blue") +
geom_point(data=dat, mapping=aes(x=Year, y=Esimate), size=4, shape=21, fill="white") +
geom_line(data=dat, mapping=aes(x=Year, y=True),linetype = "dashed")+
labs(x="Year", y = "Estimated Call Rate") +
labs(title = paste0("Estimated Call Rate Over Time, r = ",TRUE_r, ", process error = ", Sigma_r,
", "),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites and ",RecPsite,
" Acoustic Records per site (Dashed line = Actual Trend)"))
print(pltR1)
#
# Density Estimates, Acoustic and Counts
dat = data.frame(Year=x,Esimate=D_estA,lower=D_estA_CIL,upper=D_estA_CIH,True=D_true)
pltR2 = ggplot() +
geom_errorbar(data=dat, mapping=aes(x=Year, ymax=upper, ymin=lower), width=0.2, size=1, color="blue") +
geom_point(data=dat, mapping=aes(x=Year, y=Esimate), size=4, shape=21, fill="white") +
geom_line(data=dat, mapping=aes(x=Year, y=True),linetype = "dashed")+
labs(x="Year", y = "Estimated Density based on Call-Count Conversion") +
labs(title = paste0("Estimated Density Over Time, Acoustic Data, r = ",TRUE_r, ", process error = ", Sigma_r,
", "),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites and ",RecPsite,
" Acoustic Records per site (Dashed line = Actual Trend)"))
print(pltR2)
#
# Density Estimates, Counts only (if possible)
npts = sum(!is.na(D_estC))
if(npts>0){
dat = data.frame(Year=x,Esimate=D_estC,lower=D_estC_CIL,upper=D_estC_CIH,True=D_true)
pltR3 = ggplot() +
geom_errorbar(data=dat, mapping=aes(x=Year, ymax=upper, ymin=lower), width=0.2, size=1, color="blue") +
geom_point(data=dat, mapping=aes(x=Year, y=Esimate), size=4, shape=21, fill="white") +
geom_line(data=dat, mapping=aes(x=Year, y=True),linetype = "dashed")+
labs(x="Year", y = "Estimated Density from Nest Counts") +
labs(title = paste0("Estimated Density Over Time, Nest Counts, r = ",TRUE_r, ", process error = ", Sigma_r,
", "),
subtitle = paste0("Monitor ", Nyrs, " Years, Nest counts at ", Ncnts,
" sites, ", NcountsPSite, " reps per site, every ",
Countfreq, " Years (Dashed line = Actual Trend)"))
print(pltR3)
}
# Power Stats Summaries -------------------------------------------------------------
# 1) Power to Detect Trends in Call rates
sample_Rmn <- function(x, d) {
return(rnorm(1,mean(x[d]), sd(x[d])/sqrt(NSite)))
}
Power_mn <- function(x, d) {
return(100*(length(subset(x[d],x[d]<=(1-P_signif)))/length(x[d])))
}
PowerCR = 100*(length(subset(Pval,Pval<=(1-P_signif)))/length(Pval))
PowerCR_samp = boot(Pval, Power_mn, R=1000)
dfPower = data.frame(Method = "Acoustic Data, Call rate",Power=PowerCR_samp$t)
P_sig = mean(Psig)
r_CIs = colMeans(r_CI)
mean_r_est = mean(r_est)
r_est_bt = boot(r_est, sample_Rmn, R=10000)
r_est_bt = r_est_bt$t
if (npts > 0){
Powersum = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = NSite, N_CPM15_st = RecPsite,
Est_r = format(mean_r_est, digits = 3),
CI_r_Lo = format(r_CIs[1], digits = 3),
CI_r_Hi = format(r_CIs[2], digits = 3),
Power = format(PowerCR, digits=3))
} else {
Powersum = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = NSite, N_CPM15_st = RecPsite,
Est_r = format(mean_r_est, digits=3),
CI_r_Lo = format(r_CIs[1], digits = 3),
CI_r_Hi = format(r_CIs[2], digits = 3),
Power = format(PowerCR, digits=3))
}
print("Power Summary, Trend in Call Rate Estimated from Acoustic Data")
stargazer(Powersum, type = 'text', out = 'out.txt', summary=FALSE, rownames=FALSE)
print(" ")
print(" ")
df = data.frame(N_Sites = NSite, Estimate = r_est_bt)
plt = ggplot(df, aes(x=Estimate, fill = NSite)) + geom_density(alpha=.3) +
labs(title = paste0("Probability of Detecting trend in Call Rate"),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites and ",
RecPsite, " Acoustic Records per site" ),
x="Estimated Trend in Call Rate", y = "Probability of Estimate") +
geom_vline(xintercept = TRUE_r, colour = 'red') +
annotate("text", x = TRUE_r, y = 5, label = "True r", colour = 'red', hjust = -.3) +
geom_vline(xintercept = r_CIs[1], colour = 'magenta', linetype="dashed") +
geom_vline(xintercept = r_CIs[2], colour = 'magenta', linetype="dashed") +
theme(legend.position="none")
print(plt)
# ggsave(plt,filename=paste0('PowerAnalysis_',Species,'_NT_31May17.jpg'))
#
# 2) Power estimating Density Trends, Acoustic Data plus Calls to Counts Conversion Fxn
PowerA = 100*(length(subset(PvalA,PvalA<=(1-P_signif)))/length(PvalA))
PowerA_samp = boot(PvalA, Power_mn, R=1000)
dfPower = rbind(dfPower, data.frame(Method = "Acoustic-based Density Estimate",Power=PowerA_samp$t))
Psig_A = mean(PsigA)
rA_CIs = colMeans(rA_CI)
mean_r_estA = mean(rA_est)
r_est_densA = density(rA_est)
r_estA_bt = boot(rA_est, sample_Rmn, R=10000)
r_estA_bt = r_estA_bt$t
if (npts > 0){
PowersumA = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = NSite, N_CPM15_st = RecPsite,
Est_r = format(mean_r_estA, digits = 3),
CI_r_Lo = format(rA_CIs[1], digits = 3),
CI_r_Hi = format(rA_CIs[2], digits = 3),
Power = format(PowerA, digits=3))
} else {
PowersumA = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = NSite, N_CPM15_st = RecPsite,
Est_r = format(mean_r_estA, digits=3),
CI_r_Lo = format(rA_CIs[1], digits = 3),
CI_r_Hi = format(rA_CIs[2], digits = 3),
Power = format(PowerA, digits=3))
}
print("Power Summary, Density Estimated from Acoustic Data")
stargazer(PowersumA, type = 'text', out = 'out.txt', summary=FALSE, rownames=FALSE)
print(" ")
print(" ")
#
dfA = data.frame(N_Sites = Ncnts, Estimate = r_estA_bt)
pltA = ggplot(dfA, aes(x=Estimate, fill = NSite)) + geom_density(alpha=.3) +
labs(x="Estimated Population Trend", y = "Probability of Estimate") +
labs(title = paste0("Probability of Detecting Trend in Acoustic-Estimated Density"),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites, Density estimated from Conversion Fxn" )) +
geom_vline(xintercept = TRUE_r, colour = 'red') +
annotate("text", x = TRUE_r, y = 5, label = "True r", colour = 'red', hjust = -.3) +
geom_vline(xintercept = rA_CIs[1], colour = 'magenta', linetype="dashed") +
geom_vline(xintercept = rA_CIs[2], colour = 'magenta', linetype="dashed") +
theme(legend.position="none")
print(pltA)
#
# 3) Power estimating Density Trends, Counts only (Possible only if 3 or more sets of counts)
if(npts>=3){
sample_Rmn <- function(x, d) {
return(rnorm(1,mean(x[d]), sd(x[d])/sqrt(npts)))
}
# Pval_C = median(PvalC)
PowerC = 100*(length(subset(PvalC,PvalC<=(1-P_signif)))/length(PvalC))
PowerC_samp = boot(PvalC, Power_mn, R=1000)
dfPower = rbind(dfPower, data.frame(Method = "Nest Count Density Estimate",Power=PowerC_samp$t))
# PowerC = 100*(1-Pval_C)
Psig_C = mean(PsigC)
rC_CIs = colMeans(rC_CI)
mean_r_estC = mean(rC_est)
r_estC_bt = boot(rC_est, sample_Rmn, R=10000)
r_estC_bt = r_estC_bt$t
PowersumC = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = Ncnts, Yr_bt_Cnts = Countfreq,
RepNC_st = NcountsPSite,
Est_r = format(mean_r_estC, digits=3),
CI_r_Lo = format(rC_CIs[1], digits = 3),
CI_r_Hi = format(rC_CIs[2], digits = 3),
Power = format(PowerC, digits=3))
#
print("Power Summary, Density estimated from Count Data Only")
stargazer(PowersumC, type = 'text', out = 'out.txt', summary=FALSE, rownames=FALSE)
#
dfC = data.frame(N_Sites = Ncnts, Estimate = r_estC_bt)
pltC = ggplot(dfC, aes(x=Estimate, fill = NSite)) + geom_density(alpha=.3) +
labs(x="Estimated Population Trend", y = "Probability of Estimate") +
labs(title = paste0("Probability of Detecting trend, Counts Only, "),
subtitle = paste0("Monitor ", Nyrs, " Years, nest counts at ", Ncnts,
" sites, ", NcountsPSite, " reps per site, every ", Countfreq, " Years" )) +
geom_vline(xintercept = TRUE_r, colour = 'red') +
annotate("text", x = TRUE_r, y = 5, label = "True r", colour = 'red', hjust = -.3) +
geom_vline(xintercept = rC_CIs[1], colour = 'magenta', linetype="dashed") +
geom_vline(xintercept = rC_CIs[2], colour = 'magenta', linetype="dashed") +
theme(legend.position="none")
print(pltC)
}
pltP = ggplot(dfPower, aes(x=Method, y=Power)) +
geom_boxplot(fill = "light blue", colour = "black",
alpha = 0.7) +
scale_y_continuous(name = "Power to Detect Trend",labels = scales::comma) +
scale_x_discrete(name = "Method of Analysis") +
labs(title = paste0("Power Analysis, Comparison of Methods"),
subtitle = paste0("Acoustic ", Nyrs," Years with ", NSite, " Sites vs. ",
"Nest Counts at ", Ncnts, " sites, ", NcountsPSite, " reps per site, every ", Countfreq, " Years" )) +
theme(axis.text.x=element_text(angle=45,hjust=1))
print(pltP)
}
| /BayesCallsPower2_10.R | no_license | mttinker/Acoustic | R | false | false | 24,682 | r | # Program Summary ---------------------------------------------------------
# Script to run Power Analysis to determine ability to detect trends
# in seabird colonies using data from Acoustic monitors and nest counts,
# based on results from existing analyses
# NOTE: this script must be called up from "set-up" script that sets params
#
#
# Load necessary libraries (may need to install some of these) ------------
library(ggplot2)
library(dplyr)
library(gdata)
library(mvtnorm)
library(stats)
library(boot)
library(stargazer)
library(lme4)
library(car)
# Load Data ---------------------------------------------------------------
# Load results from Acoustic analysis (param values)
attach(loadfile);
s_statsA <- s_stats;
Stratalist <- Stratalist
vnA <- vn
detach(paste0('file:',loadfile),character.only = TRUE)
# rm(vn)
if(Countfreq==0){
ii = which(Stratalist$StratName==Strata_trends)
CR0 = s_statsA$Mean[which(startsWith(vnA,paste0("C[",ii)))]
minCsite = min(s_statsA$Mean[which(startsWith(vnA,paste0("Csite[",ii)))] )
Dispers = s_statsA$Mean[which(startsWith(vnA,"Dispers"))]
sigS = s_statsA$Mean[which(startsWith(vnA,"sigS"))] # variance CR across sites
Vs = sigS^2
NSite = NSiteA
Nyrs = NyrsP
Years = 1:Nyrs
# Initialize Variables
# Stats for calls
r_est = numeric(length=simreps)
Pval = numeric(length=simreps)
Psig = numeric(length=simreps)
r_CI = matrix(nrow=simreps,ncol=2)
#
C = matrix(0,nrow = simreps,ncol = Nyrs)
Ahat = matrix(0,nrow = simreps,ncol = Nyrs)
Ahatsd = matrix(0,nrow = simreps,ncol = Nyrs)
# Some matrices that get over-written
EstA = matrix(0,nrow = NSite, ncol = Nyrs)
# Run Sims
for (r in 1:simreps){
C[r,1] = CR0
for (t in 2:Nyrs){
C[r,t] = C[r,t-1]*exp(rnorm(1,TRUE_r,Sigma_r))
}
cntr = 0;
for (t in 1:Nyrs){
cntr = cntr + 1
muS = log(C[r,t]/sqrt(1+Vs/C[r,t]^2))
sgS = sqrt(log(1+ Vs/C[r,t]^2))
Csite = numeric(length=NSite)
for (s in 1:NSite){
Csite[s] = rlnorm(1,muS,sgS)
mu = Csite[s]
vr = ifelse(mu<0,0.0000001, mu + (mu^2)/Dispers)
p = max(0.00000001,1-(vr-mu)/vr)
Calls = rnbinom(RecPsite,Dispers,p)
EstA[s,t] = max(minCsite/20,mean(Calls))
}
Ahat[r,t] = mean(EstA[,t])
Ahatsd[r,t] = sd(EstA[,t])
# tmp=C[r,t]-Ahat[r,t] # Not sure if this is necessary
}
# Stats for Call rate trend est
lgN = log(Ahat[r,])
fit = lm(lgN ~ Years)
# summary(fit)
r_est[r] = as.numeric(fit$coefficients[2])
# Pval[r] = lmp(fit)
Pval[r] = as.numeric(summary(fit)$coefficients[2,4])
r_CI[r,] = as.numeric(confint(fit,level=P_signif)[2,])
if (sign(r_CI[r,1])==sign(TRUE_r) & sign(r_CI[r,2])==sign(TRUE_r)){
Psig[r] = 1
}else{
Psig[r] = 0
}
}
# Summarize sim stats ---------------------------------------------------
# C_true = colMeans(C, na.rm = "T")
C_true = apply(C,2,quantile,.5)
C_true_CIL = apply(C,2,quantile,.025)
C_true_CIH = apply(C,2,quantile,.975)
# CR_estA = colMeans(Ahat, na.rm = "T")
# CR_estA_CIL = colMeans(Ahat-1.96*(Ahatsd/sqrt(NSite)), na.rm = "T")
# CR_estA_CIH = colMeans(Ahat+1.96*(Ahatsd/sqrt(NSite)), na.rm = "T")
tmp = apply(Ahat,2,quantile,.5)
cor = C_true[1]/tmp[1]
CR_estA = tmp*cor
# CR_estA_CIL = apply(Ahat,2,quantile,.05)
# CR_estA_CIH = apply(Ahat,2,quantile,.95)
CR_estA_CIL = CR_estA -(apply(Ahat,2,sd))/sqrt(NSite)
CR_estA_CIH = CR_estA +(apply(Ahat,2,sd))/sqrt(NSite)
x = seq(1,Nyrs)
# Trend plots:
# Call rate estimates
dat = data.frame(Year=x,Esimate=CR_estA,lower=CR_estA_CIL,upper=CR_estA_CIH,True=C_true)
pltR1 = ggplot() +
geom_errorbar(data=dat, mapping=aes(x=Year, ymax=upper, ymin=lower), width=0.2, size=1, color="blue") +
geom_point(data=dat, mapping=aes(x=Year, y=Esimate), size=4, shape=21, fill="white") +
geom_line(data=dat, mapping=aes(x=Year, y=True),linetype = "dashed")+
labs(x="Year", y = "Estimated Call Rate") +
labs(title = paste0("Estimated Call Rate Over Time, r = ",TRUE_r, ", process error = ", Sigma_r,
", "),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites and ",RecPsite,
" Acoustic Records per site (Dashed line = Actual Trend)"))
print(pltR1)
# Power Stats Summaries -------------------------------------------------------------
# 1) Power to Detect Trends in Call rates
sample_Rmn <- function(x, d) {
return(rnorm(1,mean(x[d]), sd(x[d])/sqrt(NSite)))
}
Power_mn <- function(x, d) {
return(100*(length(subset(x[d],x[d]<=(1-P_signif)))/length(x[d])))
}
PowerCR = 100*(length(subset(Pval,Pval<=(1-P_signif)))/length(Pval))
PowerCR_samp = boot(Pval, Power_mn, R=1000)
dfPower = data.frame(Method = "Acoustic Data, Call rate",Power=PowerCR_samp$t)
P_sig = mean(Psig)
r_CIs = colMeans(r_CI)
mean_r_est = mean(r_est)
r_est_bt = boot(r_est, sample_Rmn, R=10000)
r_est_bt = r_est_bt$t
Powersum = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = NSite, N_CPM15_st = RecPsite,
Est_r = format(mean_r_est, digits=3),
CI_r_Lo = format(r_CIs[1], digits = 3),
CI_r_Hi = format(r_CIs[2], digits = 3),
Power = format(PowerCR, digits=3))
print("Power Summary, Trend in Call Rate Estimated from Acoustic Data")
stargazer(Powersum, type = 'text', out = 'out.txt', summary=FALSE, rownames=FALSE)
print(" ")
print(" ")
df = data.frame(N_Sites = NSite, Estimate = r_est_bt)
plt = ggplot(df, aes(x=Estimate, fill = NSite)) + geom_density(alpha=.3) +
labs(title = paste0("Probability of Detecting trend in Call Rate"),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites and ",
RecPsite, " Acoustic Records per site" ),
x="Estimated Trend in Call Rate", y = "Probability of Estimate") +
geom_vline(xintercept = TRUE_r, colour = 'red') +
annotate("text", x = TRUE_r, y = 5, label = "True r", colour = 'red', hjust = -.3) +
geom_vline(xintercept = r_CIs[1], colour = 'magenta', linetype="dashed") +
geom_vline(xintercept = r_CIs[2], colour = 'magenta', linetype="dashed") +
theme(legend.position="none")
print(plt)
}else{
# Load Call-Count conversion function
attach(loadfile2);
Convfxn = Convertfxn
Convplot = Convertplot
s_statsC <- s_stats;
vnC = vn
detach(paste0('file:',loadfile2),character.only = TRUE)
AB = rmvnorm(n=simreps, mean=Convfxn$means, sigma=Convfxn$covmat)
alphR = AB[,1]; BetaR = AB[,2]; rm(AB)
# Load count data
dfArea = read.csv(file = Areasdatfile, header = TRUE, sep = ",")
dfC = read.csv(file = loadCdat, header = TRUE, sep = ",")
#
# Get params for simulations ----------------------------------------------
#
# Fit linear mixed model to nest counts to get estimates of variance
# for sites within year and sequential counts within site
ii = which(dfC$StrataName==Strata_trends)
dfNC = dfC[ii,]
dfNC$logNC = log(dfNC$Density+1.0001)
lmm <- lmer(Density ~ contract_year + (1 | SPID), data = dfNC,
REML = FALSE)
# summary(lmm)
# Test = Anova(lmm) # get P value for linear mixed model
tmp = as.data.frame(VarCorr(lmm)) # extract variance components
sigN = tmp$sdcor[1]
sigC = tmp$sdcor[2]
Yrcounts = unique(dfNC$contract_year)
# Dens0 = mean(dfNC$Density[dfNC$contract_year==min(Yrcounts)]) # Initial nest density
ii = which(Stratalist$StratName==Strata_trends)
CR0 = s_statsA$Mean[which(startsWith(vnA,paste0("C[",ii)))]
minCsite = min(s_statsA$Mean[which(startsWith(vnA,paste0("Csite[",ii)))] )
Dispers = s_statsA$Mean[which(startsWith(vnA,"Dispers"))]
sigS = s_statsA$Mean[which(startsWith(vnA,"sigS"))] # variance CR across sites
sigD = s_statsC$Mean[which(startsWith(vnC,"sigD"))] # Variance in dens est from calls
alph = s_statsC$Mean[which(startsWith(vnC,"alpha"))] # Param 1 for call-count convers
Beta= s_statsC$Mean[which(startsWith(vnC,"Beta"))] # Param 2 for call-count convers
Dens0 = alph*CR0^Beta
#rm(s_statsC)
# rm(s_statsA)
Vn = sigN^2
Vc = sigC^2
Vs = sigS^2
Vd = sigD^2
NSite = NSiteA
Nyrs = NyrsP
Years = 1:Nyrs
Ncnts = NSiteC
# Initialize Variables
# Stats for calls
r_est = numeric(length=simreps)
Pval = numeric(length=simreps)
Psig = numeric(length=simreps)
r_CI = matrix(nrow=simreps,ncol=2)
# Stats for density est by acoustic data
rA_est = numeric(length=simreps)
PvalA = numeric(length=simreps)
PsigA = numeric(length=simreps)
rA_CI = matrix(nrow=simreps,ncol=2)
# Stats for density est by nest counts
rC_est = numeric(length=simreps)
PvalC = numeric(length=simreps)
PsigC = numeric(length=simreps)
rC_CI = matrix(nrow=simreps,ncol=2)
C = matrix(0,nrow = simreps,ncol = Nyrs)
D = matrix(0,nrow = simreps,ncol = Nyrs)
DhatA = matrix(0,nrow = simreps,ncol = Nyrs)
DhatAsd = matrix(0,nrow = simreps,ncol = Nyrs)
DhatC = matrix(0,nrow = simreps,ncol = Nyrs)
DhatCsd = matrix(0,nrow = simreps,ncol = Nyrs)
Ahat = matrix(0,nrow = simreps,ncol = Nyrs)
Ahatsd = matrix(0,nrow = simreps,ncol = Nyrs)
# Some matrices that get over-written
EstD = matrix(0,nrow = NSite, ncol = Nyrs)
EstA = matrix(0,nrow = NSite, ncol = Nyrs)
EstNC = matrix(0,nrow = Ncnts, ncol = Nyrs)
NCT = numeric(length=NcountsPSite)
# Nste = numeric(length=Ncnts)
# Run Sims
for (r in 1:simreps){
D[r,1] = Dens0
C[r,1] = CR0
for (t in 2:Nyrs){
D[r,t] = D[r,t-1]*exp(rnorm(1,TRUE_r,Sigma_r))
# C[r,t] = C[r,t-1]*exp(rnorm(1,TRUE_r,Sigma_r))
C[r,t] = (D[r,t]/alph)^(1/Beta)
# D[r,t] = alph*C[r,t]^Beta
}
cntr = 0;
for (t in 1:Nyrs){
cntr = cntr + 1
muS = log(C[r,t]/sqrt(1+Vs/C[r,t]^2))
sgS = sqrt(log(1+ Vs/C[r,t]^2))
muN = log(D[r,t]/sqrt(1+Vn/D[r,t]^2))
sgN = sqrt(log(1+ Vn/D[r,t]^2))
Csite = numeric(length=NSite)
DensN = numeric(length=Ncnts)
if (cntr == Countfreq){
for (s in 1:Ncnts){
DensN[s] = rlnorm(1,muN,sgN)
muC = log(DensN[s]/sqrt(1+Vc/DensN[s]^2))
sgC = sqrt(log(1+ Vc/DensN[s]^2))
for (c in 1:NcountsPSite){
NCT[c] = rlnorm(1,muC,sgC)
}
EstNC[s,t] = mean(NCT[c])
}
DhatC[r,t] = mean(EstNC[,t])
DhatCsd[r,t] = sd(EstNC[,t])
cntr = 0;
}else{
DhatC[r,t] = NA
DhatCsd[r,t] = NA
}
for (s in 1:NSite){
Csite[s] = rlnorm(1,muS,sgS)
mu = Csite[s]
vr = ifelse(mu<0,0.0000001, mu + (mu^2)/Dispers)
p = max(0.00000001,1-(vr-mu)/vr)
Calls = rnbinom(RecPsite,Dispers,p)
EstA[s,t] = max(minCsite/20,mean(Calls))
expD = alph*EstA[s,t]^Beta
muD = log(expD/sqrt(1+Vd/expD^2))
sgD = sqrt(log(1+ Vd/expD^2))
EstD[s,t] = rlnorm(1,muD,sgD)
# EstD[s,t] = expD
}
Ahat[r,t] = mean(EstA[,t])
Ahatsd[r,t] = sd(EstA[,t])
# DhatA[r,t] = mean(EstD[,t])
DhatA[r,t] = alphR[r]*Ahat[r,t]^BetaR[r]
DhatAsd[r,t] = sd(EstD[,t])
tmp=C[r,t]-Ahat[r,t]
}
# Stats for Call rate trend est
lgN = log(Ahat[r,])
fit = lm(lgN ~ Years)
# summary(fit)
r_est[r] = as.numeric(fit$coefficients[2])
# Pval[r] = lmp(fit)
Pval[r] = as.numeric(summary(fit)$coefficients[2,4])
r_CI[r,] = as.numeric(confint(fit,level=P_signif)[2,])
if (sign(r_CI[r,1])==sign(TRUE_r) & sign(r_CI[r,2])==sign(TRUE_r)){
Psig[r] = 1
}else{
Psig[r] = 0
}
# Stats for acoustic data density est
lgNA = log(DhatA[r,])
fitA = lm(lgNA ~ Years, na.action=na.omit)
# summary(fitA)
# PvalC[r] = lmp(fitC)
rA_est[r] = as.numeric(fitA$coefficients[2])
PvalA[r] = as.numeric(summary(fitA)$coefficients[2,4])
rA_CI[r,] = as.numeric(confint(fitA,level=P_signif)[2,])
if (sign(rA_CI[r,1])==sign(TRUE_r) & sign(rA_CI[r,2])==sign(TRUE_r)){
PsigA[r] = 1
}else{
PsigA[r] = 0
}
# Stats for nest count density est
npts = sum(!is.na(DhatC[r,]))
if(npts>2){
lgNC = log(DhatC[r,])
fitC = lm(lgNC ~ Years, na.action=na.omit)
# summary(fit)
# PvalC[r] = lmp(fitC)
rC_est[r] = as.numeric(fitC$coefficients[2])
PvalC[r] = as.numeric(summary(fitC)$coefficients[2,4])
rC_CI[r,] = as.numeric(confint(fitC,level=P_signif)[2,])
if (sign(rC_CI[r,1])==sign(TRUE_r) & sign(rC_CI[r,2])==sign(TRUE_r)){
PsigC[r] = 1
}else{
PsigC[r] = 0
}
}
}
# Summarize sim stats ---------------------------------------------------
#D_true = colMeans(D, na.rm = "T")
D_true = apply(D,2,quantile,.5)
D_true_CIL = apply(D,2,quantile,.025)
D_true_CIH = apply(D,2,quantile,.975)
# C_true = colMeans(C, na.rm = "T")
C_true = apply(C,2,quantile,.5)
C_true_CIL = apply(C,2,quantile,.025)
C_true_CIH = apply(C,2,quantile,.975)
# CR_estA = colMeans(Ahat, na.rm = "T")
# CR_estA_CIL = colMeans(Ahat-1.96*(Ahatsd/sqrt(NSite)), na.rm = "T")
# CR_estA_CIH = colMeans(Ahat+1.96*(Ahatsd/sqrt(NSite)), na.rm = "T")
CR_estA = apply(Ahat,2,quantile,.5)
# CR_estA_CIL = apply(Ahat,2,quantile,.05)
# CR_estA_CIH = apply(Ahat,2,quantile,.95)
CR_estA_CIL = CR_estA -(apply(Ahat,2,sd))/sqrt(NSite)
CR_estA_CIH = CR_estA +(apply(Ahat,2,sd))/sqrt(NSite)
#D_estA = colMeans(DhatA, na.rm = "T")
#D_estA_CIL = colMeans(DhatA-1.96*(DhatAsd/sqrt(NSite)), na.rm = "T")
# D_estA_CIH = colMeans(DhatA+1.96*(DhatAsd/sqrt(NSite)), na.rm = "T")
D_estA = apply(DhatA,2,quantile,.5)
# D_estA_CIL = apply(DhatA,2,quantile,.05)
# D_estA_CIH = apply(DhatA,2,quantile,.95)
D_estA_CIL = D_estA - (apply(DhatA,2,sd))/sqrt(NSite)
D_estA_CIH = D_estA + (apply(DhatA,2,sd))/sqrt(NSite)
# D_estC = colMeans(DhatC, na.rm = "T")
# D_estC_CIL = colMeans(DhatC-1.96*(DhatCsd/sqrt(Ncnts)), na.rm = "T")
# D_estC_CIH = colMeans(DhatC+1.96*(DhatCsd/sqrt(Ncnts)), na.rm = "T")
D_estC = apply(DhatC,2,quantile,.5,na.rm = "T")
# D_estC_CIL = apply(DhatC,2,quantile,.05,na.rm = "T")
# D_estC_CIH = apply(DhatC,2,quantile,.95,na.rm = "T")
D_estC_CIL = D_estC - (apply(DhatC,2,sd,na.rm = "T"))/sqrt(Ncnts)
D_estC_CIH = D_estC + (apply(DhatC,2,sd,na.rm = "T"))/sqrt(Ncnts)
x = seq(1,Nyrs)
# Trend plots:
# Call rate estimates
dat = data.frame(Year=x,Esimate=CR_estA,lower=CR_estA_CIL,upper=CR_estA_CIH,True=C_true)
pltR1 = ggplot() +
geom_errorbar(data=dat, mapping=aes(x=Year, ymax=upper, ymin=lower), width=0.2, size=1, color="blue") +
geom_point(data=dat, mapping=aes(x=Year, y=Esimate), size=4, shape=21, fill="white") +
geom_line(data=dat, mapping=aes(x=Year, y=True),linetype = "dashed")+
labs(x="Year", y = "Estimated Call Rate") +
labs(title = paste0("Estimated Call Rate Over Time, r = ",TRUE_r, ", process error = ", Sigma_r,
", "),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites and ",RecPsite,
" Acoustic Records per site (Dashed line = Actual Trend)"))
print(pltR1)
#
# Density Estimates, Acoustic and Counts
dat = data.frame(Year=x,Esimate=D_estA,lower=D_estA_CIL,upper=D_estA_CIH,True=D_true)
pltR2 = ggplot() +
geom_errorbar(data=dat, mapping=aes(x=Year, ymax=upper, ymin=lower), width=0.2, size=1, color="blue") +
geom_point(data=dat, mapping=aes(x=Year, y=Esimate), size=4, shape=21, fill="white") +
geom_line(data=dat, mapping=aes(x=Year, y=True),linetype = "dashed")+
labs(x="Year", y = "Estimated Density based on Call-Count Conversion") +
labs(title = paste0("Estimated Density Over Time, Acoustic Data, r = ",TRUE_r, ", process error = ", Sigma_r,
", "),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites and ",RecPsite,
" Acoustic Records per site (Dashed line = Actual Trend)"))
print(pltR2)
#
# Density Estimates, Counts only (if possible)
npts = sum(!is.na(D_estC))
if(npts>0){
dat = data.frame(Year=x,Esimate=D_estC,lower=D_estC_CIL,upper=D_estC_CIH,True=D_true)
pltR3 = ggplot() +
geom_errorbar(data=dat, mapping=aes(x=Year, ymax=upper, ymin=lower), width=0.2, size=1, color="blue") +
geom_point(data=dat, mapping=aes(x=Year, y=Esimate), size=4, shape=21, fill="white") +
geom_line(data=dat, mapping=aes(x=Year, y=True),linetype = "dashed")+
labs(x="Year", y = "Estimated Density from Nest Counts") +
labs(title = paste0("Estimated Density Over Time, Nest Counts, r = ",TRUE_r, ", process error = ", Sigma_r,
", "),
subtitle = paste0("Monitor ", Nyrs, " Years, Nest counts at ", Ncnts,
" sites, ", NcountsPSite, " reps per site, every ",
Countfreq, " Years (Dashed line = Actual Trend)"))
print(pltR3)
}
# Power Stats Summaries -------------------------------------------------------------
# 1) Power to Detect Trends in Call rates
sample_Rmn <- function(x, d) {
return(rnorm(1,mean(x[d]), sd(x[d])/sqrt(NSite)))
}
Power_mn <- function(x, d) {
return(100*(length(subset(x[d],x[d]<=(1-P_signif)))/length(x[d])))
}
PowerCR = 100*(length(subset(Pval,Pval<=(1-P_signif)))/length(Pval))
PowerCR_samp = boot(Pval, Power_mn, R=1000)
dfPower = data.frame(Method = "Acoustic Data, Call rate",Power=PowerCR_samp$t)
P_sig = mean(Psig)
r_CIs = colMeans(r_CI)
mean_r_est = mean(r_est)
r_est_bt = boot(r_est, sample_Rmn, R=10000)
r_est_bt = r_est_bt$t
if (npts > 0){
Powersum = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = NSite, N_CPM15_st = RecPsite,
Est_r = format(mean_r_est, digits = 3),
CI_r_Lo = format(r_CIs[1], digits = 3),
CI_r_Hi = format(r_CIs[2], digits = 3),
Power = format(PowerCR, digits=3))
} else {
Powersum = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = NSite, N_CPM15_st = RecPsite,
Est_r = format(mean_r_est, digits=3),
CI_r_Lo = format(r_CIs[1], digits = 3),
CI_r_Hi = format(r_CIs[2], digits = 3),
Power = format(PowerCR, digits=3))
}
print("Power Summary, Trend in Call Rate Estimated from Acoustic Data")
stargazer(Powersum, type = 'text', out = 'out.txt', summary=FALSE, rownames=FALSE)
print(" ")
print(" ")
df = data.frame(N_Sites = NSite, Estimate = r_est_bt)
plt = ggplot(df, aes(x=Estimate, fill = NSite)) + geom_density(alpha=.3) +
labs(title = paste0("Probability of Detecting trend in Call Rate"),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites and ",
RecPsite, " Acoustic Records per site" ),
x="Estimated Trend in Call Rate", y = "Probability of Estimate") +
geom_vline(xintercept = TRUE_r, colour = 'red') +
annotate("text", x = TRUE_r, y = 5, label = "True r", colour = 'red', hjust = -.3) +
geom_vline(xintercept = r_CIs[1], colour = 'magenta', linetype="dashed") +
geom_vline(xintercept = r_CIs[2], colour = 'magenta', linetype="dashed") +
theme(legend.position="none")
print(plt)
# ggsave(plt,filename=paste0('PowerAnalysis_',Species,'_NT_31May17.jpg'))
#
# 2) Power estimating Density Trends, Acoustic Data plus Calls to Counts Conversion Fxn
PowerA = 100*(length(subset(PvalA,PvalA<=(1-P_signif)))/length(PvalA))
PowerA_samp = boot(PvalA, Power_mn, R=1000)
dfPower = rbind(dfPower, data.frame(Method = "Acoustic-based Density Estimate",Power=PowerA_samp$t))
Psig_A = mean(PsigA)
rA_CIs = colMeans(rA_CI)
mean_r_estA = mean(rA_est)
r_est_densA = density(rA_est)
r_estA_bt = boot(rA_est, sample_Rmn, R=10000)
r_estA_bt = r_estA_bt$t
if (npts > 0){
PowersumA = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = NSite, N_CPM15_st = RecPsite,
Est_r = format(mean_r_estA, digits = 3),
CI_r_Lo = format(rA_CIs[1], digits = 3),
CI_r_Hi = format(rA_CIs[2], digits = 3),
Power = format(PowerA, digits=3))
} else {
PowersumA = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = NSite, N_CPM15_st = RecPsite,
Est_r = format(mean_r_estA, digits=3),
CI_r_Lo = format(rA_CIs[1], digits = 3),
CI_r_Hi = format(rA_CIs[2], digits = 3),
Power = format(PowerA, digits=3))
}
print("Power Summary, Density Estimated from Acoustic Data")
stargazer(PowersumA, type = 'text', out = 'out.txt', summary=FALSE, rownames=FALSE)
print(" ")
print(" ")
#
dfA = data.frame(N_Sites = Ncnts, Estimate = r_estA_bt)
pltA = ggplot(dfA, aes(x=Estimate, fill = NSite)) + geom_density(alpha=.3) +
labs(x="Estimated Population Trend", y = "Probability of Estimate") +
labs(title = paste0("Probability of Detecting Trend in Acoustic-Estimated Density"),
subtitle = paste0("Monitor ", Nyrs," Years with ", NSite, " Sites, Density estimated from Conversion Fxn" )) +
geom_vline(xintercept = TRUE_r, colour = 'red') +
annotate("text", x = TRUE_r, y = 5, label = "True r", colour = 'red', hjust = -.3) +
geom_vline(xintercept = rA_CIs[1], colour = 'magenta', linetype="dashed") +
geom_vline(xintercept = rA_CIs[2], colour = 'magenta', linetype="dashed") +
theme(legend.position="none")
print(pltA)
#
# 3) Power estimating Density Trends, Counts only (Possible only if 3 or more sets of counts)
if(npts>=3){
sample_Rmn <- function(x, d) {
return(rnorm(1,mean(x[d]), sd(x[d])/sqrt(npts)))
}
# Pval_C = median(PvalC)
PowerC = 100*(length(subset(PvalC,PvalC<=(1-P_signif)))/length(PvalC))
PowerC_samp = boot(PvalC, Power_mn, R=1000)
dfPower = rbind(dfPower, data.frame(Method = "Nest Count Density Estimate",Power=PowerC_samp$t))
# PowerC = 100*(1-Pval_C)
Psig_C = mean(PsigC)
rC_CIs = colMeans(rC_CI)
mean_r_estC = mean(rC_est)
r_estC_bt = boot(rC_est, sample_Rmn, R=10000)
r_estC_bt = r_estC_bt$t
PowersumC = data.frame(N_Years = Nyrs, True_r = TRUE_r, Sigma_r = Sigma_r,
N_Sites = Ncnts, Yr_bt_Cnts = Countfreq,
RepNC_st = NcountsPSite,
Est_r = format(mean_r_estC, digits=3),
CI_r_Lo = format(rC_CIs[1], digits = 3),
CI_r_Hi = format(rC_CIs[2], digits = 3),
Power = format(PowerC, digits=3))
#
print("Power Summary, Density estimated from Count Data Only")
stargazer(PowersumC, type = 'text', out = 'out.txt', summary=FALSE, rownames=FALSE)
#
dfC = data.frame(N_Sites = Ncnts, Estimate = r_estC_bt)
pltC = ggplot(dfC, aes(x=Estimate, fill = NSite)) + geom_density(alpha=.3) +
labs(x="Estimated Population Trend", y = "Probability of Estimate") +
labs(title = paste0("Probability of Detecting trend, Counts Only, "),
subtitle = paste0("Monitor ", Nyrs, " Years, nest counts at ", Ncnts,
" sites, ", NcountsPSite, " reps per site, every ", Countfreq, " Years" )) +
geom_vline(xintercept = TRUE_r, colour = 'red') +
annotate("text", x = TRUE_r, y = 5, label = "True r", colour = 'red', hjust = -.3) +
geom_vline(xintercept = rC_CIs[1], colour = 'magenta', linetype="dashed") +
geom_vline(xintercept = rC_CIs[2], colour = 'magenta', linetype="dashed") +
theme(legend.position="none")
print(pltC)
}
pltP = ggplot(dfPower, aes(x=Method, y=Power)) +
geom_boxplot(fill = "light blue", colour = "black",
alpha = 0.7) +
scale_y_continuous(name = "Power to Detect Trend",labels = scales::comma) +
scale_x_discrete(name = "Method of Analysis") +
labs(title = paste0("Power Analysis, Comparison of Methods"),
subtitle = paste0("Acoustic ", Nyrs," Years with ", NSite, " Sites vs. ",
"Nest Counts at ", Ncnts, " sites, ", NcountsPSite, " reps per site, every ", Countfreq, " Years" )) +
theme(axis.text.x=element_text(angle=45,hjust=1))
print(pltP)
}
|
#Comparing ces15 and ces19 Block Recursive Models
library(nnet)
library(broom)
library(purrr)
# ces15phone %>%
# mutate(union_both=case_when(
# PES15_93==1 | PES15_94==1 ~ 1,
# PES15_93==5 | PES15_94==5 ~ 0,
# PES15_93==8 & PES15_94==8 ~ NA_real_,
# PES15_93==9 & PES15_94==9 ~ NA_real_,
# ))->ces15phone
#### Some 2015 recodes ####
###These could be cleaned up
ces15phone$working_class<-Recode(ces15phone$occupation, "4:5=1; 3=0; 2=0; 1=0; else=NA")
#This collapses the two labour categories into one working class
ces15phone$occupation2<-Recode(as.factor(ces15phone$occupation), "4:5='Working_Class' ; 3='Routine_Nonmanual' ; 2='Managers' ; 1='Professionals'", levels=c('Working_Class', 'Managers', 'Professionals', 'Routine_Nonmanual'))
#This collapses the two labour categories into one working class; maintaining self-employed as a unique distinction
ces15phone$occupation4<-Recode(as.factor(ces15phone$occupation3), "4:5='Working_Class' ; 3='Routine_Nonmanual' ; 2='Managers' ; 1='Professionals'; 6='Self-Employed'", levels=c('Working_Class', 'Managers', 'Professionals', 'Routine_Nonmanual', 'Self-Employed'))
#this is the NDP vote variable
ces15phone$ndp<-car::Recode(ces15phone$vote, "3=1; 0:2=0; 4:5=0; NA=NA")
table(ces15phone$working_class)
table(ces15phone$ndp)
#Let's put the working class variables in order
ces15phone$occupation2<-fct_relevel(ces15phone$occupation2, "Managers", "Professionals", "Routine_Nonmanual", 'Working_Class')
ces15phone$occupation4<-fct_relevel(ces15phone$occupation4, "Managers", "Self-Employed", "Professionals", "Routine_Nonmanual", 'Working_Class')
table(ces15phone$occupation4)
ces15phone$working_class2<-Recode(ces15phone$occupation3, "4:5=1; 3=0; 2=0; 1=0; 6=0; else=NA")
table(ces15phone$working_class2)
#Turn region into factor with East as reference case
ces15phone$region3<-Recode(as.factor(ces15phone$region), "1='East' ; 2='Ontario' ; 3='West'", levels=c('East', 'Ontario', 'West'))
levels(ces15phone$region3)
table(ces15phone$region3)
#Turn income into factor with Middle as reference
ces15phone$income3<-Recode(as.factor(ces15phone$income), "1='Low_Income' ; 2:4='Middle_Income' ; 5='High_Income'", levels=c('Low_Income', 'Middle_Income', 'High_Income'))
levels(ces15phone$income3)
table(ces15phone$income3)
#Other dummies
ces15phone$low_income<-Recode(ces15phone$income, "2:5=0; 1=1")
ces15phone$high_income<-Recode(ces15phone$income, "1:4=0; 5=1")
ces15phone$no_religion<-Recode(ces15phone$religion, "0=1; 1:3=0; NA=NA")
ces15phone$catholic<-Recode(ces15phone$religion, "1=1; 2:3=0; 0=0; NA=NA")
ces15phone$young<-Recode(ces15phone$age, "35:115=0; 18:34=1")
ces15phone$old<-Recode(ces15phone$age, "55:115=1; 18:54=0")
ces15phone$foreign<-Recode(ces15phone$native, "1=0; 0=1")
#Dummies coded missing as 0
#ces15phone$low_income<-Recode(ces15phone$income, "else=0; 1=1")
#ces15phone$high_income<-Recode(ces15phone$income, "else=0; 5=1")
#ces15phone$no_religion<-Recode(ces15phone$religion, "0=1; else=0")
#ces15phone$catholic<-Recode(ces15phone$religion, "1=1; else=0")
#ces15phone$young<-Recode(ces15phone$age, "else=0; 18:34=1")
#ces15phone$old<-Recode(ces15phone$age, "55:100=1; else=0")
#ces15phone$foreign<-Recode(ces15phone$native, "else=0; 0=1")
table(ces15phone$low_income)
table(ces15phone$high_income)
table(ces15phone$no_religion)
table(ces15phone$catholic)
table(ces15phone$young)
table(ces15phone$old)
table(ces15phone$foreign)
ces15phone$working_class<-Recode(ces15phone$working_class, "1=1; else=0")
ces15phone$working_class2<-Recode(ces15phone$working_class2, "1=1; else=0")
#ces15phone$union_both<-Recode(ces15phone$union_both, "1=1; else=0")
#ces15phone$male<-Recode(ces15phone$male, "1=1; else=0")
#ces15phone$sector<-Recode(ces15phone$sector, "1=1; else=0")
#ces15phone$degree<-Recode(ces15phone$degree, "1=1; else=0")
#ces15phone$language<-Recode(ces15phone$language, "1=1; else=0")
table(ces15phone$working_class)
table(ces15phone$working_class2)
table(ces15phone$union_both)
table(ces15phone$male)
table(ces15phone$sector)
table(ces15phone$degree)
table(ces15phone$language)
# Party Id
#ces15phone$liberal_id<-Recode(ces15phone$party_id, "1=1; 0=0; 2:4=0; else=NA")
#ces15phone$conservative_id<-Recode(ces15phone$party_id, "2=1; 0:1=0; 3:4=0; else=NA")
#ces15phone$ndp_id<-Recode(ces15phone$party_id, "3=1; 0:2=0; 4=0; else=NA")
#ces15phone$bloc_id<-Recode(ces15phone$party_id, "4=1; 0:3=0; else=NA")
ces15phone$liberal_id<-Recode(ces15phone$party_id, "1=1; else=0")
ces15phone$conservative_id<-Recode(ces15phone$party_id, "2=1; else=0")
ces15phone$ndp_id<-Recode(ces15phone$party_id, "3=1; else=0")
ces15phone$bloc_id<-Recode(ces15phone$party_id, "4=1; else=0")
table(ces15phone$liberal_id)
table(ces15phone$conservative_id)
table(ces15phone$ndp_id)
table(ces15phone$bloc_id)
# Party vote
#ces15phone$liberal<-Recode(ces15phone$vote, "1=1; 0=0; 2:5=0; else=NA")
#ces15phone$conservative<-Recode(ces15phone$vote, "2=1; 0:1=0; 3:5=0; else=NA")
#ces15phone$ndp<-Recode(ces15phone$vote, "3=1; 0:2=0; 4:5=0; else=NA")
#ces15phone$bloc<-Recode(ces15phone$vote, "4=1; 0:3=0; 5=0; else=NA")
#ces15phone$green<-Recode(ces15phone$vote, "5=1; 0:4=0; else=NA")
ces15phone$liberal<-Recode(ces15phone$vote, "1=1; else=0")
ces15phone$conservative<-Recode(ces15phone$vote, "2=1; else=0")
ces15phone$ndp<-Recode(ces15phone$vote, "3=1; else=0")
ces15phone$bloc<-Recode(ces15phone$vote, "4=1; else=0")
ces15phone$green<-Recode(ces15phone$vote, "5=1; else=0")
table(ces15phone$liberal)
table(ces15phone$conservative)
table(ces15phone$ndp)
table(ces15phone$bloc)
table(ces15phone$green)
#### 2019 Recodes ####
#Recodes
#CREATE WORKING CLASS DICHOTOMOUS VARIABLE; NOTE HERE ONLY EMPLOYED AND SELF-EMPLOYED PEOPLE ARE SET TO 0 OR 1; ELSE = NA
ces19phone$working_class<-Recode(ces19phone$occupation, "4:5=1; 3=0; 2=0; 1=0; else=NA")
#This collapses the two labour categories into one working class
ces19phone$occupation2<-Recode(as.factor(ces19phone$occupation), "4:5='Working_Class' ; 3='Routine_Nonmanual' ; 2='Managers' ; 1='Professionals'", levels=c('Working_Class', 'Managers', 'Professionals', 'Routine_Nonmanual'))
#This collapses the two labour categories into one working class; maintaining self-employed as a unique distinction
ces19phone$occupation4<-Recode(as.factor(ces19phone$occupation3), "4:5='Working_Class' ; 3='Routine_Nonmanual' ; 2='Managers' ; 1='Professionals'; 6='Self-Employed'", levels=c('Working_Class', 'Managers', 'Professionals', 'Routine_Nonmanual', 'Self-Employed'))
#this is the NDP vote variable
ces19phone$ndp<-car::Recode(ces19phone$vote, "3=1; 0:2=0; 4:5=0; NA=NA")
table(ces19phone$working_class)
table(ces19phone$ndp)
#Let's put the working class variables in order
ces19phone$occupation2<-fct_relevel(ces19phone$occupation2, "Managers", "Professionals", "Routine_Nonmanual", 'Working_Class')
ces19phone$occupation4<-fct_relevel(ces19phone$occupation4, "Managers", "Self-Employed", "Professionals", "Routine_Nonmanual", 'Working_Class')
table(ces19phone$occupation4)
ces19phone$working_class2<-Recode(ces19phone$occupation3, "4:5=1; 3=0; 2=0; 1=0; 6=0; else=NA")
table(ces19phone$working_class2)
#Turn region into factor with East as reference case
ces19phone$region3<-Recode(as.factor(ces19phone$region), "1='East' ; 2='Ontario' ; 3='West'", levels=c('East', 'Ontario', 'West'))
levels(ces19phone$region3)
table(ces19phone$region3)
#Turn income into factor with Middle as reference
ces19phone$income3<-Recode(as.factor(ces19phone$income), "1='Low_Income' ; 2:4='Middle_Income' ; 5='High_Income'", levels=c('Low_Income', 'Middle_Income', 'High_Income'))
levels(ces19phone$income3)
table(ces19phone$income3)
#Other dummies
ces19phone$low_income<-Recode(ces19phone$income, "2:5=0; 1=1")
ces19phone$high_income<-Recode(ces19phone$income, "1:4=0; 5=1")
ces19phone$no_religion<-Recode(ces19phone$religion, "0=1; 1:3=0; NA=NA")
ces19phone$catholic<-Recode(ces19phone$religion, "1=1; 2:3=0; 0=0; NA=NA")
ces19phone$young<-Recode(ces19phone$age, "35:100=0; 18:34=1")
ces19phone$old<-Recode(ces19phone$age, "55:100=1; 18:54=0")
ces19phone$foreign<-Recode(ces19phone$native, "1=0; 0=1")
#Dummies coded missing as 0
#ces19phone$low_income<-Recode(ces19phone$income, "else=0; 1=1")
#ces19phone$high_income<-Recode(ces19phone$income, "else=0; 5=1")
#ces19phone$no_religion<-Recode(ces19phone$religion, "0=1; else=0")
#ces19phone$catholic<-Recode(ces19phone$religion, "1=1; else=0")
#ces19phone$young<-Recode(ces19phone$age, "else=0; 18:34=1")
#ces19phone$old<-Recode(ces19phone$age, "55:100=1; else=0")
#ces19phone$foreign<-Recode(ces19phone$native, "else=0; 0=1")
table(ces19phone$low_income)
table(ces19phone$high_income)
table(ces19phone$no_religion)
table(ces19phone$catholic)
table(ces19phone$young)
table(ces19phone$old)
table(ces19phone$foreign)
ces19phone$working_class<-Recode(ces19phone$working_class, "1=1; else=0")
ces19phone$working_class2<-Recode(ces19phone$working_class2, "1=1; else=0")
#ces19phone$union_both<-Recode(ces19phone$union_both, "1=1; else=0")
#ces19phone$male<-Recode(ces19phone$male, "1=1; else=0")
#ces19phone$sector<-Recode(ces19phone$sector, "1=1; else=0")
#ces19phone$degree<-Recode(ces19phone$degree, "1=1; else=0")
#ces19phone$language<-Recode(ces19phone$language, "1=1; else=0")
table(ces19phone$working_class)
table(ces19phone$working_class2)
table(ces19phone$union_both)
table(ces19phone$male)
table(ces19phone$sector)
table(ces19phone$degree)
table(ces19phone$language)
# Party Id
#ces19phone$liberal_id<-Recode(ces19phone$party_id, "1=1; 0=0; 2:4=0; else=NA")
#ces19phone$conservative_id<-Recode(ces19phone$party_id, "2=1; 0:1=0; 3:4=0; else=NA")
#ces19phone$ndp_id<-Recode(ces19phone$party_id, "3=1; 0:2=0; 4=0; else=NA")
#ces19phone$bloc_id<-Recode(ces19phone$party_id, "4=1; 0:3=0; else=NA")
ces19phone$liberal_id<-Recode(ces19phone$party_id, "1=1; else=0")
ces19phone$conservative_id<-Recode(ces19phone$party_id, "2=1; else=0")
ces19phone$ndp_id<-Recode(ces19phone$party_id, "3=1; else=0")
ces19phone$bloc_id<-Recode(ces19phone$party_id, "4=1; else=0")
table(ces19phone$liberal_id)
table(ces19phone$conservative_id)
table(ces19phone$ndp_id)
table(ces19phone$bloc_id)
# Party vote
#ces19phone$liberal<-Recode(ces19phone$vote, "1=1; 0=0; 2:5=0; else=NA")
#ces19phone$conservative<-Recode(ces19phone$vote, "2=1; 0:1=0; 3:5=0; else=NA")
#ces19phone$ndp<-Recode(ces19phone$vote, "3=1; 0:2=0; 4:5=0; else=NA")
#ces19phone$bloc<-Recode(ces19phone$vote, "4=1; 0:3=0; 5=0; else=NA")
#ces19phone$green<-Recode(ces19phone$vote, "5=1; 0:4=0; else=NA")
ces19phone$liberal<-Recode(ces19phone$vote, "1=1; else=0")
ces19phone$conservative<-Recode(ces19phone$vote, "2=1; else=0")
ces19phone$ndp<-Recode(ces19phone$vote, "3=1; else=0")
ces19phone$bloc<-Recode(ces19phone$vote, "4=1; else=0")
ces19phone$green<-Recode(ces19phone$vote, "5=1; else=0")
table(ces19phone$liberal)
table(ces19phone$conservative)
table(ces19phone$ndp)
table(ces19phone$bloc)
table(ces19phone$green)
#First make a ces15 roc data frame
ces15phone %>%
select(ndp, liberal, conservative, bloc, region3, working_class2, union_both, young, old, male, sector, catholic, no_religion, degree, foreign, low_income, high_income, language,
market_liberalism, moral_traditionalism, political_disaffection, continentalism, quebec_sovereignty, ndp_id, liberal_id, conservative_id, bloc_id, personal_retrospective,
national_retrospective, immigration_rate, environment, redistribution, defence, liberal_leader, conservative_leader, ndp_leader, bloc_leader, quebec, occupation4, minorities, immigration, immigration2, minorities_help, mip=mip)->out15
#Now an ces19data frame
ces19phone %>%
# filter(quebec!=1) %>%
select(ndp, liberal, conservative, bloc, region3, working_class2, union_both, young, old, male, sector, catholic, no_religion, degree, foreign, low_income, high_income, language,
market_liberalism, moral_traditionalism, political_disaffection, continentalism, quebec_sovereignty, ndp_id, liberal_id, conservative_id, bloc_id, personal_retrospective,
national_retrospective, immigration_rate, environment, redistribution, defence, liberal_leader, conservative_leader, ndp_leader, bloc_leader, quebec, occupation4, minorities, immigration, immigration2, immigration_rate, minorities_help, mip=mip_cat)->out19
out15$survey<-rep(0, nrow(out15))
out19$survey<-rep(1, nrow(out19))
val_labels(out15$survey)<-c(`2015`=0, `2019`=1)
out15 %>%
bind_rows(., out19)->out
roc<-out %>%
filter(quebec!=1)
qc<-out %>%
filter(quebec==1)
#### NDP ROC ####
block1<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income)*survey, family="binomial", data=roc)
block2<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism)*survey, family="binomial", data=roc)
block3<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id)*survey, family="binomial", data=roc)
block4<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=roc)
block5<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=roc)
block6<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader)*survey, family="binomial", data=roc)
#Turn into a list
roc_ndp<-list(block1, block2, block3, block4, block5, block6)
names(roc_ndp)<-c("block1", "block2", "block3", "block4", "block5", "block6")
library(kableExtra)
library(knitr)
## This code pulls it all nicely together.
roc_ndp %>%
#Tidy each of the models
map(., tidy) %>%
#bind them; the argument .id="Block" makes a new variable called Block filling it with the names of the list items from line 235
bind_rows(., .id="Block") %>%
#filter in only the interaction terms; they all contain :survey
filter(str_detect(term,":survey")) %>%
#Keep only the first instance of each interaction term
#First group by term; this forms groups of the terms
group_by(term) %>%
#this picks only the first term
slice(1) %>%
#get rid of the :survey
mutate(term=str_replace_all(term, ":survey", "")) %>%
#arrange them by block so that block1 variables appear first
arrange(Block) %>%
select(Block, term, estimate,p.value)->roc_ndp_table
#
# #Save the values to be bolded here
# #This requires changing which object is used e.g. roc_ndp_table might become qc_ndp_table etc.
# to_bold<-roc_ndp_table$p.value<0.05
# roc_ndp_table %>%
# kable(., digits=2) %>%
# #bold the third and fourth columns
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/ndp_roc_interaction.html")
#### NDP QC ####
block1<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign)*survey, family="binomial", data=qc)
block2<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty)*survey, family="binomial", data=qc)
block3<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id)*survey, family="binomial", data=qc)
block4<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=qc)
block5<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=qc)
block6<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader+bloc_leader)*survey, family="binomial", data=qc)
#Turn into a list
qc_ndp<-list(block1, block2, block3, block4, block5, block6)
names(qc_ndp)<-c("block1", "block2", "block3", "block4", "block5", "block6")
qc_ndp %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->qc_ndp_table
#
# #Save the values to be bolded here
# to_bold<-qc_ndp_table$p.value<0.05
# qc_ndp_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/ndp_qc_interaction.html")
#### Conservative ROC####
block1<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income)*survey, family="binomial", data=roc)
block2<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism)*survey, family="binomial", data=roc)
block3<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id)*survey, family="binomial", data=roc)
block4<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=roc)
block5<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=roc)
block6<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader)*survey, family="binomial", data=roc)
#Turn into a list
roc_conservative<-list(block1, block2, block3, block4, block5, block6)
names(roc_conservative)<-c("block1", "block2", "block3", "block4", "block5", "block6")
roc_conservative %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->roc_conservative_table
# #Save the values to be bolded here
# to_bold<-roc_conservative_table$p.value<0.05
# roc_conservative_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/conservative_roc_interaction.html")
#### Conservative QC ####
block1<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign)*survey, family="binomial", data=qc)
block2<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty)*survey, family="binomial", data=qc)
block3<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id)*survey, family="binomial", data=qc)
block4<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=qc)
block5<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=qc)
block6<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader+bloc_leader)*survey, family="binomial", data=qc)
#Turn into a list
qc_conservative<-list(block1, block2, block3, block4, block5, block6)
names(qc_conservative)<-c("block1", "block2", "block3", "block4", "block5", "block6")
qc_conservative %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->qc_conservative_table
#Save the values to be bolded here
# to_bold<-qc_conservative_table$p.value<0.05
# qc_conservative_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/conservative_qc_interaction.html")
#### Liberal ROC Interation ####
block1<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income)*survey, family="binomial", data=roc)
block2<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism)*survey, family="binomial", data=roc)
block3<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id)*survey, family="binomial", data=roc)
block4<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=roc)
block5<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=roc)
block6<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader)*survey, family="binomial", data=roc)
#Turn into a list
roc_liberal<-list(block1, block2, block3, block4, block5, block6)
names(roc_liberal)<-c("block1", "block2", "block3", "block4", "block5", "block6")
roc_liberal %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->roc_liberal_table
#Save the values to be bolded here
# to_bold<-roc_liberal_table$p.value<0.05
# roc_liberal_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/liberal_roc_interaction.html")
#### Liberal QC Interation ####
block1<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign)*survey, family="binomial", data=qc)
block2<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty)*survey, family="binomial", data=qc)
block3<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id)*survey, family="binomial", data=qc)
block4<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=qc)
block5<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=qc)
block6<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader+bloc_leader)*survey, family="binomial", data=qc)
#Turn into a list
qc_liberal<-list(block1, block2, block3, block4, block5, block6)
names(qc_liberal)<-c("block1", "block2", "block3", "block4", "block5", "block6")
qc_liberal %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->qc_liberal_table
#Save the values to be bolded here
# to_bold<-qc_liberal_table$p.value<0.05
# qc_liberal_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/liberal_qc_interaction.html")
#### Bloc QC Interation ####
block1<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign)*survey, family="binomial", data=qc)
block2<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty)*survey, family="binomial", data=qc)
block3<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id)*survey, family="binomial", data=qc)
block4<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=qc)
block5<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=qc)
block6<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader+bloc_leader)*survey, family="binomial", data=qc)
#Turn into a list
qc_bloc<-list(block1, block2, block3, block4, block5, block6)
names(qc_bloc)<-c("block1", "block2", "block3", "block4", "block5", "block6")
qc_bloc %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->qc_bloc_table
#Save the values to be bolded here
# to_bold<-qc_bloc_table$p.value<0.05
# qc_bloc_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/bloc_qc_interaction.html")
#### Format Nice comprehensive QC and ROC Tables
#Step 1 combine all the parties' tables into roc and qc
roc_table<-cbind(roc_ndp_table, roc_liberal_table, roc_conservative_table)
qc_table<-cbind(qc_ndp_table, qc_bloc_table, qc_liberal_table, qc_conservative_table)
library(flextable)
#### C
#Drop out terms we don't need.
names(roc_table)
#Keep the first Block and the first term
#So drop columns 5 and 9, 6 and 10
roc_table %>%
select(-5, -9, -6, -10) %>%
#Rename them just proper names
rename(., Block=1, term=2, NDP=3, sig_ndp=4, Liberal=5,sig_liberal=6, Conservative=7, sig_con=8) %>%
#Turn this object into a flextable object. See https://davidgohel.github.io/flextable/
flextable(.) %>%
#format the flextable to two digits
colformat_num(digits=2) %>%
#bold function bolds rows i that meet conditions and column j
#So here, it bolds rows i where sig_ndp < 0.05 and only bolds columns j
#note that it uses formula notation ~
bold(., i=~sig_ndp< 0.05, j=~NDP+sig_ndp) %>%
#Repeat for LIberals
bold(., i=~sig_liberal< 0.05, j=~Liberal+sig_liberal) %>%
#conservatives
bold(., i=~sig_con< 0.05, j=~Conservative+sig_con) %>%
#This sets the background colour conditional on the term
#So if it is block1, 3 or 5, grey it out.
bg(., i=~str_detect(Block, "block1|block3|block5"), bg="grey") %>%
add_header_lines(values=c("ROC Block Recursive Model Coefficients, 2015 and 2019")) %>% save_as_html("Tables/roc_block_recursive_table.html")
####Combine Quebec Table ####
#Drop out terms we don't need.
#First chekc the names
names(qc_table)
#Keep the first Block and the first term
#So drop columns 5, 6 and 9, 10 and 13 and 14
qc_table %>%
select(-5, -6, -9,-10, -13, -14) %>%
rename(., Block=1, term=2, NDP=3, sig_ndp=4, Liberal=5,sig_liberal=6, Conservative=7, sig_con=8, BQ=9, sig_bq=10) %>%
flextable(.) %>%
colformat_num(digits=2) %>%
bold(., i=~sig_ndp< 0.05, j=~NDP+sig_ndp) %>%
bold(., i=~sig_liberal< 0.05, j=~Liberal+sig_liberal) %>%
bold(., i=~sig_con< 0.05, j=~Conservative+sig_con) %>%
bg(., i=~str_detect(Block, "block1|block3|block5"), bg="grey") %>%
add_header_lines(values=c("Quebec Block Recursive Model Coefficients, 2015 and 2019")) %>%
save_as_html(., "Tables/qc_block_recursive_model.html")
#save_as_docx(., path="Tables/qc_block_recursive_model.docx")
#### Run some checks with what appears itn the table####
#Could you please just check a few coefficients randomly to be sure they are correct
#EAch model is stored in either roc_ndp, qc_ndp etc. etc. etc. followed by $block1, $block2, #Just pick four or five randomly in different blocks and in qc, roc. Just enough to be sure we are not making a mistake.
summary(roc_ndp$block1)#Interaction coefficient for male:survey is -0.38; I have confirmed visually it is -0.39 in the file roc_block_recursive_table.
summary(roc_liberal$block1)
summary(roc_conservative$block3)
## Did the union movement really go down for all parties?
roc_ndp_table %>%
filter(term=="union_both")
roc_liberal_table %>%
filter(term=="union_both")
roc_conservative_table %>%
filter(term=="union_both")
#### Policy variation change between 2015-19####
#scales goes from -1 Left to +1 Right
library(psych)
#Positive RW scales (no need to reverse code)
table(ces15phone$moral_traditionalism, useNA="ifany")
table(ces15phone$market_liberalism, useNA="ifany")
table(ces15phone$continentalism, useNA="ifany")
ces15phone$redistribution
#Reverse code positive LW scales to positive RW scales
out$environment<-reverse.code(-1, out[,'environment'])
out$redistribution<-reverse.code(-1, out[,'redistribution'])
out$immigration<-reverse.code(-1, out[,'immigration'])
out$immigration2<-reverse.code(-1, out[,'immigration2'])
out$immigration_rate<-reverse.code(-1, out[,'immigration_rate'])
out$minorities_help<-reverse.code(-1, out[,'minorities_help'])
#checks
table(out$environment, useNA="ifany")
table(out$redistribution, useNA="ifany")
table(out$immigration, useNA="ifany")
table(out$immigration2, useNA="ifany")
table(out$immigration_rate, useNA="ifany")
table(out$minorities_help, useNA="ifany")
table(out$environment, useNA="ifany")
table(out$redistribution, useNA="ifany")
table(out$immigration, useNA="ifany")
table(out$immigration2, useNA="ifany")
table(out$immigration_rate, useNA="ifany")
table(out$minorities_help, useNA="ifany")
#Policy rating changes
####Attitudinal change ####
out %>%
select(immigration, immigration2, immigration_rate, minorities_help, environment, redistribution, continentalism, moral_traditionalism, market_liberalism, survey, occupation4) %>%
pivot_longer(cols=immigration:market_liberalism) %>%
group_by(survey, occupation4, name) %>%
summarize(Average=mean(value, na.rm=T)) %>%
arrange(occupation4, name, survey) %>%
group_by(name, occupation4) %>%
mutate(Difference=Average-lag(Average)) %>%
filter(survey==1) %>%
ggplot(., aes(x=occupation4, y=Difference))+geom_point(position="jitter")+ylim(-0.115,0.15)+labs(x="Class", y="Difference (2019-2015)", caption="This graph shows the difference between 2015 and 2019 scores on a range of items by social class.\nThe items have all been scored from 0 to 1 so a shift by 0.1 equals a 10% shift in the underlying sentiment.\n All items have been scored here such that positive differences are a shift to the right\nand negative scores are a shift to the left", title="Attitudinal Differences by Social Class", subtitle="CES 2015 and 2019")+facet_wrap(~name)+coord_flip()+geom_hline(yintercept=0, linetype=2)
ggsave("Plots/attitudinal_differences_2015_2019.png")
#Leader rating changes
out %>%
select(liberal_leader, conservative_leader, ndp_leader, bloc_leader, survey, occupation4) %>%
pivot_longer(cols=liberal_leader:bloc_leader) %>%
group_by(survey, occupation4, name) %>%
summarize(Average=mean(value, na.rm=T)) %>%
arrange(occupation4, name, survey) %>%
group_by(name, occupation4) %>%
mutate(Difference=Average-lag(Average)) %>%
filter(survey==1) %>%
ggplot(., aes(x=occupation4, y=Difference, col=name))+geom_point(position="jitter")+ylim(-0.2,0.2)+labs(x="Class", y="Difference (2019-2015)")
#### Most important problem####
out %>%
group_by(Survey=as_factor(survey), `Most Important Problem`=as_factor(mip)) %>%
summarise(n=n()) %>%
filter(!is.na(`Most Important Problem`)) %>%
ggplot(., aes(y=reorder(`Most Important Problem`,n), x=n, fill=Survey))+geom_col(position="dodge")+scale_fill_grey()+labs(y="Most Important Problem")
ggsave("Plots/mip_2015_2019.png")
| /R_Scripts/8_block_recursive_models.R | no_license | sjkiss/CES_Analysis | R | false | false | 36,980 | r | #Comparing ces15 and ces19 Block Recursive Models
library(nnet)
library(broom)
library(purrr)
# ces15phone %>%
# mutate(union_both=case_when(
# PES15_93==1 | PES15_94==1 ~ 1,
# PES15_93==5 | PES15_94==5 ~ 0,
# PES15_93==8 & PES15_94==8 ~ NA_real_,
# PES15_93==9 & PES15_94==9 ~ NA_real_,
# ))->ces15phone
#### Some 2015 recodes ####
###These could be cleaned up
ces15phone$working_class<-Recode(ces15phone$occupation, "4:5=1; 3=0; 2=0; 1=0; else=NA")
#This collapses the two labour categories into one working class
ces15phone$occupation2<-Recode(as.factor(ces15phone$occupation), "4:5='Working_Class' ; 3='Routine_Nonmanual' ; 2='Managers' ; 1='Professionals'", levels=c('Working_Class', 'Managers', 'Professionals', 'Routine_Nonmanual'))
#This collapses the two labour categories into one working class; maintaining self-employed as a unique distinction
ces15phone$occupation4<-Recode(as.factor(ces15phone$occupation3), "4:5='Working_Class' ; 3='Routine_Nonmanual' ; 2='Managers' ; 1='Professionals'; 6='Self-Employed'", levels=c('Working_Class', 'Managers', 'Professionals', 'Routine_Nonmanual', 'Self-Employed'))
#this is the NDP vote variable
ces15phone$ndp<-car::Recode(ces15phone$vote, "3=1; 0:2=0; 4:5=0; NA=NA")
table(ces15phone$working_class)
table(ces15phone$ndp)
#Let's put the working class variables in order
ces15phone$occupation2<-fct_relevel(ces15phone$occupation2, "Managers", "Professionals", "Routine_Nonmanual", 'Working_Class')
ces15phone$occupation4<-fct_relevel(ces15phone$occupation4, "Managers", "Self-Employed", "Professionals", "Routine_Nonmanual", 'Working_Class')
table(ces15phone$occupation4)
ces15phone$working_class2<-Recode(ces15phone$occupation3, "4:5=1; 3=0; 2=0; 1=0; 6=0; else=NA")
table(ces15phone$working_class2)
#Turn region into factor with East as reference case
ces15phone$region3<-Recode(as.factor(ces15phone$region), "1='East' ; 2='Ontario' ; 3='West'", levels=c('East', 'Ontario', 'West'))
levels(ces15phone$region3)
table(ces15phone$region3)
#Turn income into factor with Middle as reference
ces15phone$income3<-Recode(as.factor(ces15phone$income), "1='Low_Income' ; 2:4='Middle_Income' ; 5='High_Income'", levels=c('Low_Income', 'Middle_Income', 'High_Income'))
levels(ces15phone$income3)
table(ces15phone$income3)
#Other dummies
ces15phone$low_income<-Recode(ces15phone$income, "2:5=0; 1=1")
ces15phone$high_income<-Recode(ces15phone$income, "1:4=0; 5=1")
ces15phone$no_religion<-Recode(ces15phone$religion, "0=1; 1:3=0; NA=NA")
ces15phone$catholic<-Recode(ces15phone$religion, "1=1; 2:3=0; 0=0; NA=NA")
ces15phone$young<-Recode(ces15phone$age, "35:115=0; 18:34=1")
ces15phone$old<-Recode(ces15phone$age, "55:115=1; 18:54=0")
ces15phone$foreign<-Recode(ces15phone$native, "1=0; 0=1")
#Dummies coded missing as 0
#ces15phone$low_income<-Recode(ces15phone$income, "else=0; 1=1")
#ces15phone$high_income<-Recode(ces15phone$income, "else=0; 5=1")
#ces15phone$no_religion<-Recode(ces15phone$religion, "0=1; else=0")
#ces15phone$catholic<-Recode(ces15phone$religion, "1=1; else=0")
#ces15phone$young<-Recode(ces15phone$age, "else=0; 18:34=1")
#ces15phone$old<-Recode(ces15phone$age, "55:100=1; else=0")
#ces15phone$foreign<-Recode(ces15phone$native, "else=0; 0=1")
table(ces15phone$low_income)
table(ces15phone$high_income)
table(ces15phone$no_religion)
table(ces15phone$catholic)
table(ces15phone$young)
table(ces15phone$old)
table(ces15phone$foreign)
ces15phone$working_class<-Recode(ces15phone$working_class, "1=1; else=0")
ces15phone$working_class2<-Recode(ces15phone$working_class2, "1=1; else=0")
#ces15phone$union_both<-Recode(ces15phone$union_both, "1=1; else=0")
#ces15phone$male<-Recode(ces15phone$male, "1=1; else=0")
#ces15phone$sector<-Recode(ces15phone$sector, "1=1; else=0")
#ces15phone$degree<-Recode(ces15phone$degree, "1=1; else=0")
#ces15phone$language<-Recode(ces15phone$language, "1=1; else=0")
table(ces15phone$working_class)
table(ces15phone$working_class2)
table(ces15phone$union_both)
table(ces15phone$male)
table(ces15phone$sector)
table(ces15phone$degree)
table(ces15phone$language)
# Party Id
#ces15phone$liberal_id<-Recode(ces15phone$party_id, "1=1; 0=0; 2:4=0; else=NA")
#ces15phone$conservative_id<-Recode(ces15phone$party_id, "2=1; 0:1=0; 3:4=0; else=NA")
#ces15phone$ndp_id<-Recode(ces15phone$party_id, "3=1; 0:2=0; 4=0; else=NA")
#ces15phone$bloc_id<-Recode(ces15phone$party_id, "4=1; 0:3=0; else=NA")
ces15phone$liberal_id<-Recode(ces15phone$party_id, "1=1; else=0")
ces15phone$conservative_id<-Recode(ces15phone$party_id, "2=1; else=0")
ces15phone$ndp_id<-Recode(ces15phone$party_id, "3=1; else=0")
ces15phone$bloc_id<-Recode(ces15phone$party_id, "4=1; else=0")
table(ces15phone$liberal_id)
table(ces15phone$conservative_id)
table(ces15phone$ndp_id)
table(ces15phone$bloc_id)
# Party vote
#ces15phone$liberal<-Recode(ces15phone$vote, "1=1; 0=0; 2:5=0; else=NA")
#ces15phone$conservative<-Recode(ces15phone$vote, "2=1; 0:1=0; 3:5=0; else=NA")
#ces15phone$ndp<-Recode(ces15phone$vote, "3=1; 0:2=0; 4:5=0; else=NA")
#ces15phone$bloc<-Recode(ces15phone$vote, "4=1; 0:3=0; 5=0; else=NA")
#ces15phone$green<-Recode(ces15phone$vote, "5=1; 0:4=0; else=NA")
ces15phone$liberal<-Recode(ces15phone$vote, "1=1; else=0")
ces15phone$conservative<-Recode(ces15phone$vote, "2=1; else=0")
ces15phone$ndp<-Recode(ces15phone$vote, "3=1; else=0")
ces15phone$bloc<-Recode(ces15phone$vote, "4=1; else=0")
ces15phone$green<-Recode(ces15phone$vote, "5=1; else=0")
table(ces15phone$liberal)
table(ces15phone$conservative)
table(ces15phone$ndp)
table(ces15phone$bloc)
table(ces15phone$green)
#### 2019 Recodes ####
#Recodes
#CREATE WORKING CLASS DICHOTOMOUS VARIABLE; NOTE HERE ONLY EMPLOYED AND SELF-EMPLOYED PEOPLE ARE SET TO 0 OR 1; ELSE = NA
ces19phone$working_class<-Recode(ces19phone$occupation, "4:5=1; 3=0; 2=0; 1=0; else=NA")
#This collapses the two labour categories into one working class
ces19phone$occupation2<-Recode(as.factor(ces19phone$occupation), "4:5='Working_Class' ; 3='Routine_Nonmanual' ; 2='Managers' ; 1='Professionals'", levels=c('Working_Class', 'Managers', 'Professionals', 'Routine_Nonmanual'))
#This collapses the two labour categories into one working class; maintaining self-employed as a unique distinction
ces19phone$occupation4<-Recode(as.factor(ces19phone$occupation3), "4:5='Working_Class' ; 3='Routine_Nonmanual' ; 2='Managers' ; 1='Professionals'; 6='Self-Employed'", levels=c('Working_Class', 'Managers', 'Professionals', 'Routine_Nonmanual', 'Self-Employed'))
#this is the NDP vote variable
ces19phone$ndp<-car::Recode(ces19phone$vote, "3=1; 0:2=0; 4:5=0; NA=NA")
table(ces19phone$working_class)
table(ces19phone$ndp)
#Let's put the working class variables in order
ces19phone$occupation2<-fct_relevel(ces19phone$occupation2, "Managers", "Professionals", "Routine_Nonmanual", 'Working_Class')
ces19phone$occupation4<-fct_relevel(ces19phone$occupation4, "Managers", "Self-Employed", "Professionals", "Routine_Nonmanual", 'Working_Class')
table(ces19phone$occupation4)
ces19phone$working_class2<-Recode(ces19phone$occupation3, "4:5=1; 3=0; 2=0; 1=0; 6=0; else=NA")
table(ces19phone$working_class2)
#Turn region into factor with East as reference case
ces19phone$region3<-Recode(as.factor(ces19phone$region), "1='East' ; 2='Ontario' ; 3='West'", levels=c('East', 'Ontario', 'West'))
levels(ces19phone$region3)
table(ces19phone$region3)
#Turn income into factor with Middle as reference
ces19phone$income3<-Recode(as.factor(ces19phone$income), "1='Low_Income' ; 2:4='Middle_Income' ; 5='High_Income'", levels=c('Low_Income', 'Middle_Income', 'High_Income'))
levels(ces19phone$income3)
table(ces19phone$income3)
#Other dummies
ces19phone$low_income<-Recode(ces19phone$income, "2:5=0; 1=1")
ces19phone$high_income<-Recode(ces19phone$income, "1:4=0; 5=1")
ces19phone$no_religion<-Recode(ces19phone$religion, "0=1; 1:3=0; NA=NA")
ces19phone$catholic<-Recode(ces19phone$religion, "1=1; 2:3=0; 0=0; NA=NA")
ces19phone$young<-Recode(ces19phone$age, "35:100=0; 18:34=1")
ces19phone$old<-Recode(ces19phone$age, "55:100=1; 18:54=0")
ces19phone$foreign<-Recode(ces19phone$native, "1=0; 0=1")
#Dummies coded missing as 0
#ces19phone$low_income<-Recode(ces19phone$income, "else=0; 1=1")
#ces19phone$high_income<-Recode(ces19phone$income, "else=0; 5=1")
#ces19phone$no_religion<-Recode(ces19phone$religion, "0=1; else=0")
#ces19phone$catholic<-Recode(ces19phone$religion, "1=1; else=0")
#ces19phone$young<-Recode(ces19phone$age, "else=0; 18:34=1")
#ces19phone$old<-Recode(ces19phone$age, "55:100=1; else=0")
#ces19phone$foreign<-Recode(ces19phone$native, "else=0; 0=1")
table(ces19phone$low_income)
table(ces19phone$high_income)
table(ces19phone$no_religion)
table(ces19phone$catholic)
table(ces19phone$young)
table(ces19phone$old)
table(ces19phone$foreign)
ces19phone$working_class<-Recode(ces19phone$working_class, "1=1; else=0")
ces19phone$working_class2<-Recode(ces19phone$working_class2, "1=1; else=0")
#ces19phone$union_both<-Recode(ces19phone$union_both, "1=1; else=0")
#ces19phone$male<-Recode(ces19phone$male, "1=1; else=0")
#ces19phone$sector<-Recode(ces19phone$sector, "1=1; else=0")
#ces19phone$degree<-Recode(ces19phone$degree, "1=1; else=0")
#ces19phone$language<-Recode(ces19phone$language, "1=1; else=0")
table(ces19phone$working_class)
table(ces19phone$working_class2)
table(ces19phone$union_both)
table(ces19phone$male)
table(ces19phone$sector)
table(ces19phone$degree)
table(ces19phone$language)
# Party Id
#ces19phone$liberal_id<-Recode(ces19phone$party_id, "1=1; 0=0; 2:4=0; else=NA")
#ces19phone$conservative_id<-Recode(ces19phone$party_id, "2=1; 0:1=0; 3:4=0; else=NA")
#ces19phone$ndp_id<-Recode(ces19phone$party_id, "3=1; 0:2=0; 4=0; else=NA")
#ces19phone$bloc_id<-Recode(ces19phone$party_id, "4=1; 0:3=0; else=NA")
ces19phone$liberal_id<-Recode(ces19phone$party_id, "1=1; else=0")
ces19phone$conservative_id<-Recode(ces19phone$party_id, "2=1; else=0")
ces19phone$ndp_id<-Recode(ces19phone$party_id, "3=1; else=0")
ces19phone$bloc_id<-Recode(ces19phone$party_id, "4=1; else=0")
table(ces19phone$liberal_id)
table(ces19phone$conservative_id)
table(ces19phone$ndp_id)
table(ces19phone$bloc_id)
# Party vote
#ces19phone$liberal<-Recode(ces19phone$vote, "1=1; 0=0; 2:5=0; else=NA")
#ces19phone$conservative<-Recode(ces19phone$vote, "2=1; 0:1=0; 3:5=0; else=NA")
#ces19phone$ndp<-Recode(ces19phone$vote, "3=1; 0:2=0; 4:5=0; else=NA")
#ces19phone$bloc<-Recode(ces19phone$vote, "4=1; 0:3=0; 5=0; else=NA")
#ces19phone$green<-Recode(ces19phone$vote, "5=1; 0:4=0; else=NA")
ces19phone$liberal<-Recode(ces19phone$vote, "1=1; else=0")
ces19phone$conservative<-Recode(ces19phone$vote, "2=1; else=0")
ces19phone$ndp<-Recode(ces19phone$vote, "3=1; else=0")
ces19phone$bloc<-Recode(ces19phone$vote, "4=1; else=0")
ces19phone$green<-Recode(ces19phone$vote, "5=1; else=0")
table(ces19phone$liberal)
table(ces19phone$conservative)
table(ces19phone$ndp)
table(ces19phone$bloc)
table(ces19phone$green)
#First make a ces15 roc data frame
ces15phone %>%
select(ndp, liberal, conservative, bloc, region3, working_class2, union_both, young, old, male, sector, catholic, no_religion, degree, foreign, low_income, high_income, language,
market_liberalism, moral_traditionalism, political_disaffection, continentalism, quebec_sovereignty, ndp_id, liberal_id, conservative_id, bloc_id, personal_retrospective,
national_retrospective, immigration_rate, environment, redistribution, defence, liberal_leader, conservative_leader, ndp_leader, bloc_leader, quebec, occupation4, minorities, immigration, immigration2, minorities_help, mip=mip)->out15
#Now an ces19data frame
ces19phone %>%
# filter(quebec!=1) %>%
select(ndp, liberal, conservative, bloc, region3, working_class2, union_both, young, old, male, sector, catholic, no_religion, degree, foreign, low_income, high_income, language,
market_liberalism, moral_traditionalism, political_disaffection, continentalism, quebec_sovereignty, ndp_id, liberal_id, conservative_id, bloc_id, personal_retrospective,
national_retrospective, immigration_rate, environment, redistribution, defence, liberal_leader, conservative_leader, ndp_leader, bloc_leader, quebec, occupation4, minorities, immigration, immigration2, immigration_rate, minorities_help, mip=mip_cat)->out19
out15$survey<-rep(0, nrow(out15))
out19$survey<-rep(1, nrow(out19))
val_labels(out15$survey)<-c(`2015`=0, `2019`=1)
out15 %>%
bind_rows(., out19)->out
roc<-out %>%
filter(quebec!=1)
qc<-out %>%
filter(quebec==1)
#### NDP ROC ####
block1<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income)*survey, family="binomial", data=roc)
block2<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism)*survey, family="binomial", data=roc)
block3<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id)*survey, family="binomial", data=roc)
block4<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=roc)
block5<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=roc)
block6<-glm(ndp~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader)*survey, family="binomial", data=roc)
#Turn into a list
roc_ndp<-list(block1, block2, block3, block4, block5, block6)
names(roc_ndp)<-c("block1", "block2", "block3", "block4", "block5", "block6")
library(kableExtra)
library(knitr)
## This code pulls it all nicely together.
roc_ndp %>%
#Tidy each of the models
map(., tidy) %>%
#bind them; the argument .id="Block" makes a new variable called Block filling it with the names of the list items from line 235
bind_rows(., .id="Block") %>%
#filter in only the interaction terms; they all contain :survey
filter(str_detect(term,":survey")) %>%
#Keep only the first instance of each interaction term
#First group by term; this forms groups of the terms
group_by(term) %>%
#this picks only the first term
slice(1) %>%
#get rid of the :survey
mutate(term=str_replace_all(term, ":survey", "")) %>%
#arrange them by block so that block1 variables appear first
arrange(Block) %>%
select(Block, term, estimate,p.value)->roc_ndp_table
#
# #Save the values to be bolded here
# #This requires changing which object is used e.g. roc_ndp_table might become qc_ndp_table etc.
# to_bold<-roc_ndp_table$p.value<0.05
# roc_ndp_table %>%
# kable(., digits=2) %>%
# #bold the third and fourth columns
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/ndp_roc_interaction.html")
#### NDP QC ####
block1<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign)*survey, family="binomial", data=qc)
block2<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty)*survey, family="binomial", data=qc)
block3<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id)*survey, family="binomial", data=qc)
block4<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=qc)
block5<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=qc)
block6<-glm(ndp~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader+bloc_leader)*survey, family="binomial", data=qc)
#Turn into a list
qc_ndp<-list(block1, block2, block3, block4, block5, block6)
names(qc_ndp)<-c("block1", "block2", "block3", "block4", "block5", "block6")
qc_ndp %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->qc_ndp_table
#
# #Save the values to be bolded here
# to_bold<-qc_ndp_table$p.value<0.05
# qc_ndp_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/ndp_qc_interaction.html")
#### Conservative ROC####
block1<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income)*survey, family="binomial", data=roc)
block2<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism)*survey, family="binomial", data=roc)
block3<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id)*survey, family="binomial", data=roc)
block4<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=roc)
block5<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=roc)
block6<-glm(conservative~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader)*survey, family="binomial", data=roc)
#Turn into a list
roc_conservative<-list(block1, block2, block3, block4, block5, block6)
names(roc_conservative)<-c("block1", "block2", "block3", "block4", "block5", "block6")
roc_conservative %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->roc_conservative_table
# #Save the values to be bolded here
# to_bold<-roc_conservative_table$p.value<0.05
# roc_conservative_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/conservative_roc_interaction.html")
#### Conservative QC ####
block1<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign)*survey, family="binomial", data=qc)
block2<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty)*survey, family="binomial", data=qc)
block3<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id)*survey, family="binomial", data=qc)
block4<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=qc)
block5<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=qc)
block6<-glm(conservative~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader+bloc_leader)*survey, family="binomial", data=qc)
#Turn into a list
qc_conservative<-list(block1, block2, block3, block4, block5, block6)
names(qc_conservative)<-c("block1", "block2", "block3", "block4", "block5", "block6")
qc_conservative %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->qc_conservative_table
#Save the values to be bolded here
# to_bold<-qc_conservative_table$p.value<0.05
# qc_conservative_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/conservative_qc_interaction.html")
#### Liberal ROC Interation ####
block1<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income)*survey, family="binomial", data=roc)
block2<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism)*survey, family="binomial", data=roc)
block3<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id)*survey, family="binomial", data=roc)
block4<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=roc)
block5<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=roc)
block6<-glm(liberal~(region3+working_class2+union_both+young+old+male+sector+catholic+no_religion+degree+foreign+low_income+high_income+market_liberalism+moral_traditionalism+political_disaffection+continentalism+ndp_id+liberal_id+conservative_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader)*survey, family="binomial", data=roc)
#Turn into a list
roc_liberal<-list(block1, block2, block3, block4, block5, block6)
names(roc_liberal)<-c("block1", "block2", "block3", "block4", "block5", "block6")
roc_liberal %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->roc_liberal_table
#Save the values to be bolded here
# to_bold<-roc_liberal_table$p.value<0.05
# roc_liberal_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/liberal_roc_interaction.html")
#### Liberal QC Interation ####
block1<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign)*survey, family="binomial", data=qc)
block2<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty)*survey, family="binomial", data=qc)
block3<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id)*survey, family="binomial", data=qc)
block4<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=qc)
block5<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=qc)
block6<-glm(liberal~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader+bloc_leader)*survey, family="binomial", data=qc)
#Turn into a list
qc_liberal<-list(block1, block2, block3, block4, block5, block6)
names(qc_liberal)<-c("block1", "block2", "block3", "block4", "block5", "block6")
qc_liberal %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->qc_liberal_table
#Save the values to be bolded here
# to_bold<-qc_liberal_table$p.value<0.05
# qc_liberal_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/liberal_qc_interaction.html")
#### Bloc QC Interation ####
block1<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign)*survey, family="binomial", data=qc)
block2<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty)*survey, family="binomial", data=qc)
block3<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id)*survey, family="binomial", data=qc)
block4<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective)*survey, family="binomial", data=qc)
block5<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence)*survey, family="binomial", data=qc)
block6<-glm(bloc~(working_class2+union_both+young+old+male+degree+language+foreign+market_liberalism+moral_traditionalism+political_disaffection+continentalism+quebec_sovereignty+ndp_id+liberal_id+conservative_id+bloc_id+personal_retrospective+national_retrospective+immigration_rate+environment+redistribution+defence+liberal_leader+conservative_leader+ndp_leader+bloc_leader)*survey, family="binomial", data=qc)
#Turn into a list
qc_bloc<-list(block1, block2, block3, block4, block5, block6)
names(qc_bloc)<-c("block1", "block2", "block3", "block4", "block5", "block6")
qc_bloc %>%
map(., tidy) %>%
bind_rows(., .id="Block") %>%
filter(str_detect(term,":survey")) %>%
group_by(term) %>%
slice(1) %>%
mutate(term=str_replace_all(term, ":survey", "")) %>%
arrange(Block) %>%
select(Block, term, estimate,p.value)->qc_bloc_table
#Save the values to be bolded here
# to_bold<-qc_bloc_table$p.value<0.05
# qc_bloc_table %>%
# kable(., digits=2) %>%
# column_spec(3:4, bold=to_bold) %>%
# save_kable(file="Tables/bloc_qc_interaction.html")
#### Format Nice comprehensive QC and ROC Tables
#Step 1 combine all the parties' tables into roc and qc
roc_table<-cbind(roc_ndp_table, roc_liberal_table, roc_conservative_table)
qc_table<-cbind(qc_ndp_table, qc_bloc_table, qc_liberal_table, qc_conservative_table)
library(flextable)
#### C
#Drop out terms we don't need.
names(roc_table)
#Keep the first Block and the first term
#So drop columns 5 and 9, 6 and 10
roc_table %>%
select(-5, -9, -6, -10) %>%
#Rename them just proper names
rename(., Block=1, term=2, NDP=3, sig_ndp=4, Liberal=5,sig_liberal=6, Conservative=7, sig_con=8) %>%
#Turn this object into a flextable object. See https://davidgohel.github.io/flextable/
flextable(.) %>%
#format the flextable to two digits
colformat_num(digits=2) %>%
#bold function bolds rows i that meet conditions and column j
#So here, it bolds rows i where sig_ndp < 0.05 and only bolds columns j
#note that it uses formula notation ~
bold(., i=~sig_ndp< 0.05, j=~NDP+sig_ndp) %>%
#Repeat for LIberals
bold(., i=~sig_liberal< 0.05, j=~Liberal+sig_liberal) %>%
#conservatives
bold(., i=~sig_con< 0.05, j=~Conservative+sig_con) %>%
#This sets the background colour conditional on the term
#So if it is block1, 3 or 5, grey it out.
bg(., i=~str_detect(Block, "block1|block3|block5"), bg="grey") %>%
add_header_lines(values=c("ROC Block Recursive Model Coefficients, 2015 and 2019")) %>% save_as_html("Tables/roc_block_recursive_table.html")
####Combine Quebec Table ####
#Drop out terms we don't need.
#First chekc the names
names(qc_table)
#Keep the first Block and the first term
#So drop columns 5, 6 and 9, 10 and 13 and 14
qc_table %>%
select(-5, -6, -9,-10, -13, -14) %>%
rename(., Block=1, term=2, NDP=3, sig_ndp=4, Liberal=5,sig_liberal=6, Conservative=7, sig_con=8, BQ=9, sig_bq=10) %>%
flextable(.) %>%
colformat_num(digits=2) %>%
bold(., i=~sig_ndp< 0.05, j=~NDP+sig_ndp) %>%
bold(., i=~sig_liberal< 0.05, j=~Liberal+sig_liberal) %>%
bold(., i=~sig_con< 0.05, j=~Conservative+sig_con) %>%
bg(., i=~str_detect(Block, "block1|block3|block5"), bg="grey") %>%
add_header_lines(values=c("Quebec Block Recursive Model Coefficients, 2015 and 2019")) %>%
save_as_html(., "Tables/qc_block_recursive_model.html")
#save_as_docx(., path="Tables/qc_block_recursive_model.docx")
#### Run some checks with what appears itn the table####
#Could you please just check a few coefficients randomly to be sure they are correct
#EAch model is stored in either roc_ndp, qc_ndp etc. etc. etc. followed by $block1, $block2, #Just pick four or five randomly in different blocks and in qc, roc. Just enough to be sure we are not making a mistake.
summary(roc_ndp$block1)#Interaction coefficient for male:survey is -0.38; I have confirmed visually it is -0.39 in the file roc_block_recursive_table.
summary(roc_liberal$block1)
summary(roc_conservative$block3)
## Did the union movement really go down for all parties?
roc_ndp_table %>%
filter(term=="union_both")
roc_liberal_table %>%
filter(term=="union_both")
roc_conservative_table %>%
filter(term=="union_both")
#### Policy variation change between 2015-19####
#scales goes from -1 Left to +1 Right
library(psych)
#Positive RW scales (no need to reverse code)
table(ces15phone$moral_traditionalism, useNA="ifany")
table(ces15phone$market_liberalism, useNA="ifany")
table(ces15phone$continentalism, useNA="ifany")
ces15phone$redistribution
#Reverse code positive LW scales to positive RW scales
out$environment<-reverse.code(-1, out[,'environment'])
out$redistribution<-reverse.code(-1, out[,'redistribution'])
out$immigration<-reverse.code(-1, out[,'immigration'])
out$immigration2<-reverse.code(-1, out[,'immigration2'])
out$immigration_rate<-reverse.code(-1, out[,'immigration_rate'])
out$minorities_help<-reverse.code(-1, out[,'minorities_help'])
#checks
table(out$environment, useNA="ifany")
table(out$redistribution, useNA="ifany")
table(out$immigration, useNA="ifany")
table(out$immigration2, useNA="ifany")
table(out$immigration_rate, useNA="ifany")
table(out$minorities_help, useNA="ifany")
table(out$environment, useNA="ifany")
table(out$redistribution, useNA="ifany")
table(out$immigration, useNA="ifany")
table(out$immigration2, useNA="ifany")
table(out$immigration_rate, useNA="ifany")
table(out$minorities_help, useNA="ifany")
#Policy rating changes
####Attitudinal change ####
out %>%
select(immigration, immigration2, immigration_rate, minorities_help, environment, redistribution, continentalism, moral_traditionalism, market_liberalism, survey, occupation4) %>%
pivot_longer(cols=immigration:market_liberalism) %>%
group_by(survey, occupation4, name) %>%
summarize(Average=mean(value, na.rm=T)) %>%
arrange(occupation4, name, survey) %>%
group_by(name, occupation4) %>%
mutate(Difference=Average-lag(Average)) %>%
filter(survey==1) %>%
ggplot(., aes(x=occupation4, y=Difference))+geom_point(position="jitter")+ylim(-0.115,0.15)+labs(x="Class", y="Difference (2019-2015)", caption="This graph shows the difference between 2015 and 2019 scores on a range of items by social class.\nThe items have all been scored from 0 to 1 so a shift by 0.1 equals a 10% shift in the underlying sentiment.\n All items have been scored here such that positive differences are a shift to the right\nand negative scores are a shift to the left", title="Attitudinal Differences by Social Class", subtitle="CES 2015 and 2019")+facet_wrap(~name)+coord_flip()+geom_hline(yintercept=0, linetype=2)
ggsave("Plots/attitudinal_differences_2015_2019.png")
#Leader rating changes
out %>%
select(liberal_leader, conservative_leader, ndp_leader, bloc_leader, survey, occupation4) %>%
pivot_longer(cols=liberal_leader:bloc_leader) %>%
group_by(survey, occupation4, name) %>%
summarize(Average=mean(value, na.rm=T)) %>%
arrange(occupation4, name, survey) %>%
group_by(name, occupation4) %>%
mutate(Difference=Average-lag(Average)) %>%
filter(survey==1) %>%
ggplot(., aes(x=occupation4, y=Difference, col=name))+geom_point(position="jitter")+ylim(-0.2,0.2)+labs(x="Class", y="Difference (2019-2015)")
#### Most important problem####
out %>%
group_by(Survey=as_factor(survey), `Most Important Problem`=as_factor(mip)) %>%
summarise(n=n()) %>%
filter(!is.na(`Most Important Problem`)) %>%
ggplot(., aes(y=reorder(`Most Important Problem`,n), x=n, fill=Survey))+geom_col(position="dodge")+scale_fill_grey()+labs(y="Most Important Problem")
ggsave("Plots/mip_2015_2019.png")
|
#' Estimates of CV SCNP
#'
#' This function computes K-fold cross-validated estimates of estimates of
#' cross-validated sensitivity-constrained rate of negative prediction (SCRNP). This
#' quantity can be interpreted as the rate of negative classification for a fixed
#' constraint on the sensitivity of a prediction algorithm. Thus, if an algorithm
#' has a high SCRNP, it will also have a high positive predictive value.
#'
#' To estimate the SCRNP using K-fold cross-validation is problematic. If
#' data are partitioned into K distinct groups, depending on the sample size
#' and choice of K, the validation sample may be quite small. In order to estimate
#' SCRNP, we require estimation of a quantile of the predictor's distribution. More extreme
#' quantiles (which correspond to high sensitivity constraints) are difficult to estimate
#' using few observations. Here, we estimate relevant nuisance parameters in the training sample and use
#' the validation sample to perform some form of bias correction -- either through
#' cross-validated targeted minimum loss-based estimation, estimating equations,
#' or one-step estimation. When aggressive learning algorithms are applied, it is
#' necessary to use an additional layer of cross-validation in the training sample
#' to estimate the nuisance parameters. This is controlled via the \code{nested_cv}
#' option below.
#'
#' @param Y A numeric vector of outcomes, assume to equal \code{0} or \code{1}.
#' @param X A \code{data.frame} or \code{matrix} of variables for prediction.
#' @param K The number of cross-validation folds (default is \code{10}).
#' @param sens The sensitivity constraint imposed on the rate of negative prediction
#' (see description).
#' @param learner A wrapper that implements the desired method for building a
#' prediction algorithm.
#' @param nested_cv A boolean indicating whether nested cross validation should
#' be used to estimate the distribution of the prediction function. Default (\code{TRUE})
#' is best choice for aggressive \code{learner}'s, while \code{FALSE} is reasonable
#' for smooth \code{learner}'s (e.g., logistic regression).
#' @param nested_K If nested cross validation is used, how many inner folds should
#' there be? Default (\code{K-1}) affords quicker computation by reusing training
#' fold learner fits.
#' @param parallel A boolean indicating whether prediction algorithms should be
#' trained in parallel. Default to \code{FALSE}.
#' @param max_cvtmle_iter Maximum number of iterations for the bias correction
#' step of the CV-TMLE estimator (default \code{10}).
#' @param cvtmle_ictol The CV-TMLE will iterate \code{max_cvtmle_iter} is reached
#' or mean of cross-validated efficient influence function is less than
#' \code{cvtmle_cvtmle_ictol}.
#' @param quantile_type Type of quantile estimator to be used. See \link[stats]{quantile}
#' for description.
#' @param prediction_list For power users: a list of predictions made by \code{learner}
#' that has a format compatible with \code{cvauc}.
#' @param ... Other arguments, not currently used
#' @importFrom SuperLearner CVFolds
#' @importFrom cvAUC ci.cvAUC
#' @importFrom stats uniroot
#' @export
#' @return An object of class \code{"scrnp"}. \describe{
#' \item{\code{est_cvtmle}}{cross-validated targeted minimum loss-based estimator of K-fold CV AUC}
#' \item{\code{iter_cvtmle}}{iterations needed to achieve convergence of CVTMLE algorithm}
#' \item{\code{cvtmle_trace}}{the value of the CVTMLE at each iteration of the targeting algorithm}
#' \item{\code{se_cvtmle}}{estimated standard error based on targeted nuisance parameters}
#' \item{\code{est_init}}{plug-in estimate of CV AUC where nuisance parameters are estimated
#' in the training sample}
#' \item{\code{est_empirical}}{the standard K-fold CV AUC estimator}
#' \item{\code{se_empirical}}{estimated standard error for the standard estimator}
#' \item{\code{est_onestep}}{cross-validated one-step estimate of K-fold CV AUC}
#' \item{\code{se_onestep}}{estimated standard error for the one-step estimator}
#' \item{\code{est_esteq}}{cross-validated estimating equations estimate of K-fold CV AUC
#' (here, equivalent to one-step, since the estimating equation is linear in SCRNP)}
#' \item{\code{se_esteq}}{estimated standard error for the estimating equations estimator
#' (same as one-step)}
#' \item{\code{folds}}{list of observation indexes in each validation fold}
#' \item{\code{ic_cvtmle}}{influence function evaluated at the targeted nuisance parameter
#' estimates}
#' \item{\code{ic_onestep}}{influence function evaluated at the training-fold-estimated
#' nuisance parameters}
#' \item{\code{ic_esteq}}{influence function evaluated at the training-fold-estimated
#' nuisance parameters}
#' \item{\code{ic_empirical}}{influence function evaluated at the validation-fold
#' estimated nuisance parameters}
#' \item{\code{prediction_list}}{a list of output from the cross-validated model training;
#' see the individual wrapper function documentation for further details}
#' }
#' @examples
#' # simulate data
#' n <- 200
#' p <- 10
#' X <- data.frame(matrix(rnorm(n*p), nrow = n, ncol = p))
#' Y <- rbinom(n, 1, plogis(X[,1] + X[,10]))
#'
#' # estimate cv scrnp of logistic regression
#' scrnp_ests <- cv_scrnp(Y = Y, X = X, K = 5,
#' nested_cv = FALSE,
#' learner = "glm_wrapper")
#'
#' # estimate cv scrnp of random forest with nested
#' # cross-validation for nuisance parameter estimation
#' \donttest{
#' scrnp_ests <- cv_scrnp(Y = Y, X = X, K = 5,
#' nested_cv = TRUE,
#' learner = "randomforest_wrapper")
#' }
cv_scrnp <- function(Y, X, K = 10, sens = 0.95,
learner = "glm_wrapper",
nested_cv = TRUE,
nested_K = K - 1,
parallel = FALSE,
max_cvtmle_iter = 10,
cvtmle_ictol = 1/length(Y),
quantile_type = 8,
prediction_list = NULL,
...){
# test inputs
assertthat::assert_that(all(Y %in% c(0,1)))
assertthat::assert_that(0 < sens & sens < 1)
if(!nested_cv){
assertthat::assert_that(K > 1)
}else{
assertthat::assert_that(K > 2)
assertthat::assert_that(nested_K > 1)
}
# sample size
n <- length(Y)
# make outer cross-validation folds
folds <- SuperLearner::CVFolds(
N = n, id = NULL, Y = Y, cvControl = list(
V = K, stratifyCV = ifelse(K <= sum(Y) & K <= sum(!Y), TRUE, FALSE),
shuffle = TRUE, validRows = NULL
)
)
# train learners in all necessary combination of folds
if(is.null(prediction_list)){
prediction_list <- .get_predictions(
learner = learner, Y = Y, X = X, K = K, nested_K = nested_K,
folds=folds, parallel = FALSE, nested_cv = nested_cv
)
}
# get quantile estimates
if(!nested_cv){
quantile_list <- lapply(prediction_list[seq_len(K)], .get_quantile, p = 1 - sens,
quantile_type = quantile_type)
}else{
quantile_list <- sapply(1:K, .get_nested_cv_quantile, quantile_type = quantile_type,
prediction_list = prediction_list, folds = folds,
p = 1 - sens, simplify = FALSE)
}
# get density estimate
if(!nested_cv){
density_list <- mapply(x = prediction_list[1:K], c0 = quantile_list,
FUN = .get_density, SIMPLIFY = FALSE)
}else{
density_list <- mapply(x = split(seq_len(K), seq_len(K)), c0 = quantile_list,
FUN = .get_density, SIMPLIFY = FALSE,
MoreArgs = list(prediction_list = prediction_list,
folds = folds, nested_cv = nested_cv))
}
# make targeting data
if(!nested_cv){
target_and_pred_data <- .make_targeting_data(prediction_list = prediction_list,
quantile_list = quantile_list,
density_list = density_list,
folds = folds, gn = mean(Y))
target_data <- target_and_pred_data$out
pred_data <- target_and_pred_data$out_pred
}else{
target_and_pred_data <- sapply(seq_len(K), .make_targeting_data,
prediction_list = prediction_list,
quantile_list = quantile_list,
density_list = density_list, folds = folds,
gn = mean(Y), nested_cv = TRUE, simplify = FALSE)
target_data <- Reduce("rbind", lapply(target_and_pred_data, "[[", "out"))
pred_data <- Reduce("rbind", lapply(target_and_pred_data, "[[", "out_pred"))
}
target_data$weight <- with(target_data, 1 - Y/gn * f_ratio)
pred_data$weight <- with(pred_data, 1 - Y/gn * f_ratio)
target_data$logit_Fn <- SuperLearner::trimLogit(target_data$Fn, trim = 1e-5)
pred_data$logit_Fn <- SuperLearner::trimLogit(pred_data$Fn, trim = 1e-5)
fluc_mod <- glm(ind ~ -1 + offset(logit_Fn) + weight,
data = target_data, family = "binomial", start = 0)
target_data$Fnstar <- fluc_mod$fitted.values
pred_data$Fnstar <- predict(fluc_mod, newdata = pred_data, type = "response")
# compute initial non-targeted estimates
init_estimates <- by(pred_data, pred_data$fold, function(x){
x$Fn[x$Y==0][1] * (1-x$gn[1]) + x$Fn[x$Y==1][1] * x$gn[1]
})
# compute parameter for each fold
cvtmle_estimates <- by(pred_data, pred_data$fold, function(x){
x$Fnstar[x$Y==0][1] * (1-x$gn[1]) + x$Fnstar[x$Y==1][1] * x$gn[1]
})
target_data$F0nstar <- NaN
target_data$F1nstar <- NaN
for(k in seq_len(K)){
target_data$F0nstar[target_data$fold == k] <- pred_data$Fnstar[pred_data$Y == 0 & pred_data$fold == k][1]
target_data$F0n[target_data$fold == k] <- pred_data$Fn[pred_data$Y == 0 & pred_data$fold == k][1]
target_data$F1nstar[target_data$fold == k] <- pred_data$Fnstar[pred_data$Y == 1 & pred_data$fold == k][1]
target_data$F1n[target_data$fold == k] <- pred_data$Fn[pred_data$Y == 1 & pred_data$fold == k][1]
}
target_data$DY_cvtmle <- with(target_data, Fnstar - (gn*F1nstar + (1 - gn)*F0nstar))
target_data$Dpsi_cvtmle <- with(target_data, weight * (ind - Fnstar))
target_data$DY_os <- with(target_data, Fn - (gn*F1n + (1 - gn)*F0n))
target_data$Dpsi_os <- with(target_data, weight * (ind - Fn))
# cvtmle estimates
est_cvtmle <- mean(cvtmle_estimates)
se_cvtmle <- sqrt(var(target_data$DY_cvtmle + target_data$Dpsi_cvtmle) / n)
# initial estimate
est_init <- mean(unlist(init_estimates))
est_onestep <- est_init + mean(target_data$DY_os + target_data$Dpsi_os)
se_onestep <- sqrt(var(target_data$DY_os + target_data$Dpsi_os) / n)
# get CV estimator
cv_empirical_estimates <- .get_cv_estim(prediction_list[1:K], sens = sens,
gn = mean(Y), quantile_type = quantile_type)
# sample split estimate
est_empirical <- mean(unlist(lapply(cv_empirical_estimates, "[[", "est")))
var_empirical <- mean(unlist(lapply(cv_empirical_estimates, function(x){
var(x$ic)
})))
ic_empirical <- Reduce(c, lapply(cv_empirical_estimates, "[[", "ic"))
se_empirical <- sqrt(var_empirical / n)
# format output
out <- list()
out$est_cvtmle <- est_cvtmle
# out$iter_cvtmle <- iter
# out$cvtmle_trace <- tmle_auc
out$se_cvtmle <- se_cvtmle
out$est_init <- est_init
out$est_empirical <- est_empirical
out$se_empirical <- se_empirical
out$est_onestep <- est_onestep
out$se_onestep <- se_onestep
out$est_esteq <- est_onestep
out$se_esteq <- se_onestep
out$se_cvtmle_type <- out$se_esteq_type <-
out$se_empirical_type <- out$se_onestep_type <- "std"
out$ic_cvtmle <- target_data$DY_cvtmle + target_data$Dpsi_cvtmle
out$ic_onestep <- target_data$DY_os + target_data$Dpsi_os
out$ic_empirical <- ic_empirical
out$prediction_list <- prediction_list
class(out) <- "scrnp"
return(out)
}
#' Helper function to get results for a single cross-validation fold
#' @param x An entry in prediction_list.
#' @param sens The sensitivity constraint.
#' @param gn An estimate of the marginal probability that \code{Y = 1}.
#' @param quantile_type The type of quantile estimate to use.
#' @param ... Other options (not currently used)
#' @importFrom stats quantile
.get_one_fold <- function(x, sens, gn, quantile_type = 8, ...){
# get quantile
c0 <- stats::quantile(x$test_pred[x$test_y == 1], p = 1 - sens, type = quantile_type)
# get influence function
F1nc0 <- mean(x$test_pred[x$test_y == 1] <= c0)
F0nc0 <- mean(x$test_pred[x$test_y == 0] <= c0)
FYnc0 <- ifelse(x$test_y == 1, F1nc0, F0nc0)
Psi <- gn * F1nc0 + (1-gn) * F0nc0
DY <- FYnc0 - Psi
# get density estimate
dens <- tryCatch({.get_density(x = x, c0 = c0,
bounded_kernel = FALSE,
x_name = "test_pred",
y_name = "test_y",
nested_cv = FALSE, prediction_list = NULL,
folds = NULL)}, error = function(e){
list(f_0_c0 = 1, f_10_c0 = 1)
})
weight <- (1 - x$test_y / gn * dens$f_0_c0/dens$f_10_c0)
ind <- as.numeric(x$test_pred <= c0)
Dpsi <- weight * (ind - FYnc0)
return(list(est = Psi, ic = DY + Dpsi))
}
#' Helper function to turn prediction_list into CV estimate of SCRNP
#' @param prediction_list Properly formatted list of predictions.
#' @param sens The sensitivity constraint.
#' @param gn The marginal probability that \code{Y = 1}.
#' @param quantile_type The type of quantile estimate to use.
#' @param ... Other options (not currently used)
.get_cv_estim <- function(prediction_list, sens, gn, quantile_type = 8, ...){
allFolds <- lapply(prediction_list, .get_one_fold, sens = sens, gn = gn,
quantile_type = quantile_type)
return(allFolds)
}
#' Helper function to get quantile for a single training fold data
#' when nested CV is NOT used.
#' @param x An entry in prediction_list.
#' @param p The quantile to get.
#' @param quantile_type The type of quantile estimate to use.
#' @importFrom stats quantile
.get_quantile <- function(x, p, quantile_type = 8){
stats::quantile(x$train_pred[x$train_y == 1], p = p, type = quantile_type)
}
#' Helper function to get quantile for a single training fold data
#' when nested CV is used.
#' @param x An entry in prediction_list.
#' @param p The quantile to get.
#' @param prediction_list Properly formatted list of predictions.
#' @param folds Cross-validation fold assignments.
#' @param quantile_type The type of quantile estimate to use.
#' @importFrom stats quantile
.get_nested_cv_quantile <- function(x, p, prediction_list, folds,
quantile_type = 8){
# find all V-1 fold CV fits with this x in them. These will be the inner
# CV fits that are needed. The first entry in this vector will correspond
# to the outer V fold CV fit, which is what we want to make the outcome
# of the long data list with.
valid_folds_idx <- which(unlist(lapply(prediction_list, function(z){
x %in% z$valid_folds }), use.names = FALSE))
# get only inner validation predictions
inner_valid_prediction_and_y_list <- lapply(prediction_list[valid_folds_idx[-1]],
function(z){
# pick out the fold that is not the outer validation fold
inner_valid_idx <- which(!(z$valid_ids %in% folds[[x]]))
# get predictions for this fold
inner_pred <- z$test_pred[inner_valid_idx]
inner_y <- z$test_y[inner_valid_idx]
return(list(test_pred = inner_pred, inner_test_y = inner_y))
})
# now get all values of psi from inner validation with Y = 1
train_psi_1 <- unlist(lapply(inner_valid_prediction_and_y_list, function(z){
z$test_pred[z$inner_test_y == 1]
}), use.names = FALSE)
# get the quantile
c0 <- quantile(train_psi_1, p = p, type = quantile_type)
return(c0)
}
#' Function to estimate density needed to evaluate standard errors.
#' @param x An entry in prediction_list.
#' @param c0 The point at which the density estimate is evaluated.
#' @param bounded_kernel Should a bounded kernel be used? Default is \code{FALSE}.
#' @param x_name Name of variable to compute density of.
#' @param y_name Name of variable to stratify density computation on.
#' @param nested_cv Use nested CV to estimate density?
#' @param prediction_list Properly formatted list of predictions.
#' @param folds Cross-validation fold assignments.
#' @param maxDens The maximum allowed value for the density.
#' @param ... Other options (not currently used)
#' @importFrom np npudensbw npudens
#' @importFrom stats predict
.get_density <- function(x, c0, bounded_kernel = FALSE,
x_name = "train_pred",
y_name = "train_y",
nested_cv = FALSE, prediction_list = NULL,
folds = NULL, maxDens = 1e3, ... ){
if(!nested_cv){
if(!bounded_kernel){
if(length(unique(x[[x_name]][x$train_y == 1])) > 1){
# density given y = 1
fitbw <- np::npudensbw(x[[x_name]][x[[y_name]] == 1])
fit <- np::npudens(fitbw)
# estimate at c0
f_10_c0 <- stats::predict(fit, edat = c0)
}else{
f_10_c0 <- maxDens
}
if(length(unique(x[[x_name]])) > 1){
# marginal density
fitbw_marg <- np::npudensbw(x[[x_name]])
fit_marg <- np::npudens(fitbw_marg)
# estimate at c0
f_0_c0 <- stats::predict(fit_marg, edat = c0)
}else{
f_0_c0 <- maxDens
}
}else{
stop("bounded density estimation removed from package")
}
}else{
# find all V-1 fold CV fits with this x in them. These will be the inner
# CV fits that are needed. The first entry in this vector will correspond
# to the outer V fold CV fit, which is what we want to make the outcome
# of the long data list with.
valid_folds_idx <- which(unlist(lapply(prediction_list, function(z){
x %in% z$valid_folds }), use.names = FALSE))
# get only inner validation predictions
inner_valid_prediction_and_y_list <- lapply(prediction_list[valid_folds_idx[-1]],
function(z){
# pick out the fold that is not the outer validation fold
inner_valid_idx <- which(!(z$valid_ids %in% folds[[x]]))
# get predictions for this fold
inner_pred <- z$test_pred[inner_valid_idx]
inner_y <- z$test_y[inner_valid_idx]
return(list(test_pred = inner_pred, inner_test_y = inner_y))
})
all_pred <- Reduce("c", lapply(inner_valid_prediction_and_y_list, "[[",
"test_pred"))
all_y <- Reduce("c", lapply(inner_valid_prediction_and_y_list, "[[",
"inner_test_y"))
if(bounded_kernel){
stop("bounded density estimation removed from package")
}else{
if(length(unique(all_pred[all_y == 1])) > 1){
# density given y = 1
fitbw <- np::npudensbw(all_pred[all_y == 1])
fit <- np::npudens(fitbw)
# estimate at c0
f_10_c0 <- stats::predict(fit, edat = c0)
}else{
f_10_c0 <- maxDens
}
if(length(unique(all_pred[all_y == 1])) > 1){
# marginal density
fitbw_marg <- np::npudensbw(all_pred)
fit_marg <- np::npudens(fitbw_marg)
# estimate at c0
f_0_c0 <- stats::predict(fit_marg, edat = c0)
}else{
f_0_c0 <- maxDens
}
}
}
# return both
return(list(f_10_c0 = f_10_c0, f_0_c0 = f_0_c0))
}
#' Helper function for making data set in proper format for CVTMLE
#'
#' @param x A numeric identifier of which entry in \code{prediction_list} to operate on.
#' @param prediction_list Properly formatted list of predictions.
#' @param quantile_list List of estimated quantile for each fold.
#' @param density_list List of density estimates for each fold.
#' @param folds Cross-validation fold assignments.
#' @param nested_cv A boolean indicating whether nested CV was used in estimation.
#' @param gn An estimate of the marginal probability that \code{Y = 1}.
.make_targeting_data <- function(x, prediction_list, quantile_list,
density_list, folds,
nested_cv = FALSE, gn){
K <- length(folds)
if(!nested_cv){
Y_vec <- Reduce(c, lapply(prediction_list, "[[", "test_y"))
Y_vec_pred <- rep(c(0,1), K)
n <- length(Y_vec)
fold_vec <- sort(rep(seq_len(K), unlist(lapply(folds, length))))
fold_vec_pred <- sort(rep(seq_len(K), 2))
gn_vec <- gn
F_nBn_vec <- Reduce(c, mapply(FUN = function(m, c0){
F_nBn_y1_at_c0 <- F_nBn_star(psi_x = c0, y = 1, train_pred = m$train_pred, train_y = m$train_y)
F_nBn_y0_at_c0 <- F_nBn_star(psi_x = c0, y = 0, train_pred = m$train_pred, train_y = m$train_y)
ifelse(m$test_y == 0, F_nBn_y0_at_c0, F_nBn_y1_at_c0)
}, c0 = quantile_list, m = prediction_list))
F_nBn_vec_pred <- Reduce(c, mapply(FUN = function(m, c0){
F_nBn_y1_at_c0 <- F_nBn_star(psi_x = c0, y = 1, train_pred = m$train_pred, train_y = m$train_y)
F_nBn_y0_at_c0 <- F_nBn_star(psi_x = c0, y = 0, train_pred = m$train_pred, train_y = m$train_y)
c(F_nBn_y0_at_c0, F_nBn_y1_at_c0)
}, c0 = quantile_list, m = prediction_list))
dens_ratio <- Reduce(c, mapply(FUN = function(m, dens){
if(dens[[1]] == 0){ dens[[1]] <- 1e-3 }
if(dens[[2]] > 1e3){ dens[[2]] <- 1e3 }
rep(dens[[2]]/dens[[1]], length(m$test_y))
}, m = prediction_list, dens = density_list))
dens_ratio_pred <- Reduce(c, mapply(FUN = function(m, dens){
if(dens[[1]] == 0){ dens[[1]] <- 1e-3 }
if(dens[[2]] > 1e3){ dens[[2]] <- 1e3 }
rep(dens[[2]]/dens[[1]], 2)
}, m = prediction_list, dens = density_list))
ind <- Reduce(c, mapply(m = prediction_list, c0 = quantile_list, function(m, c0){
as.numeric(m$test_pred <= c0)
}))
ind_pred <- c(NA, NA)
}else{
# find all V-1 fold CV fits with this x in them. These will be the inner
# CV fits that are needed. The first entry in this vector will correspond
# to the outer V fold CV fit, which is what we want to make the outcome
# of the long data list with.
valid_folds_idx <- which(unlist(lapply(prediction_list, function(z){
x %in% z$valid_folds }), use.names = FALSE))
# get only inner validation predictions
inner_valid_prediction_and_y_list <- lapply(prediction_list[valid_folds_idx[-1]],
function(z){
# pick out the fold that is not the outer validation fold
inner_valid_idx <- which(!(z$valid_ids %in% folds[[x]]))
# get predictions for this fold
inner_pred <- z$test_pred[inner_valid_idx]
inner_y <- z$test_y[inner_valid_idx]
return(list(test_pred = inner_pred, inner_test_y = inner_y))
})
Y_vec <- Reduce(c, lapply(prediction_list[x], "[[", "test_y"))
Y_vec_pred <- c(0,1)
fold_vec <- rep(x, length(Y_vec))
fold_vec_pred <- rep(x, length(Y_vec_pred))
gn_vec <- gn
n <- length(Y_vec)
F_nBn_y1_at_c0 <- F_nBn_star_nested_cv(psi_x = quantile_list[[x]], y = 1,
inner_valid_prediction_and_y_list = inner_valid_prediction_and_y_list)
F_nBn_y0_at_c0 <- F_nBn_star_nested_cv(psi_x = quantile_list[[x]], y = 0,
inner_valid_prediction_and_y_list = inner_valid_prediction_and_y_list)
F_nBn_vec <- ifelse(prediction_list[[x]]$test_y == 0, F_nBn_y0_at_c0, F_nBn_y1_at_c0)
F_nBn_vec_pred <- c(F_nBn_y0_at_c0, F_nBn_y1_at_c0)
if(density_list[[x]][[1]] == 0){ density_list[[x]][[1]] <- 1e-3 }
if(density_list[[x]][[2]] > 1e3){ density_list[[x]][[2]] <- 1e3 }
dens_ratio <- density_list[[x]][[2]]/density_list[[x]][[1]]
dens_ratio_pred <- density_list[[x]][[2]]/density_list[[x]][[1]]
ind <- as.numeric(prediction_list[[x]]$test_pred <= quantile_list[[x]])
}
out <- data.frame(fold = fold_vec, Y = Y_vec, gn = gn_vec, Fn = F_nBn_vec,
f_ratio = dens_ratio, ind = ind)
out_pred <- data.frame(fold = fold_vec_pred, Y = Y_vec_pred, gn = gn_vec,
Fn = F_nBn_vec_pred, f_ratio = dens_ratio_pred)
return(list(out = out, out_pred = out_pred))
}
#' Compute the bootstrap-corrected estimator of SCRNP.
#'
#' This estimator is computed by re-sampling with replacement (i.e., bootstrap
#' sampling) from the data. The SCRNP is computed for the learner trained on the
#' full data. The SCRNP is then computed for the learner trained on each bootstrap
#' sample. The average difference between the full data-trained learner and
#' the bootstrap-trained learner is computed to estimate the bias in the full-data-estimated
#' SCRNP. The final estimate of SCRNP is given by the difference in the full-data SCRNP
#' and the estimated bias.
#'
#' @param Y A numeric vector of outcomes, assume to equal \code{0} or \code{1}.
#' @param X A \code{data.frame} of variables for prediction.
#' @param B The number of bootstrap samples.
#' @param learner A wrapper that implements the desired method for building a
#' prediction algorithm. See \code{?glm_wrapper} or read the package vignette
#' for more information on formatting \code{learner}s.
#' @param sens The sensitivity constraint to use.
#' @param correct632 A boolean indicating whether to use the .632 correction.
#' @param ... Other options, not currently used.
#' @return A list with \code{$scrnp} the bootstrap-corrected estimate of SCRNP and
#' \code{$n_valid_boot} as the number of bootstrap of bootstrap samples where \code{learner}
#' successfully executed.
#' @export
#' @examples
#' # simulate data
#' X <- data.frame(x1 = rnorm(50))
#' Y <- rbinom(50, 1, plogis(X$x1))
#' # compute bootstrap estimate of scrnp for logistic regression
#' # use small B for fast run
#' boot <- boot_scrnp(Y = Y, X = X, B = 25, learner = "glm_wrapper")
#' @export
boot_scrnp <- function(Y, X, B = 200, learner = "glm_wrapper",
sens = 0.95, correct632 = FALSE, ...){
n <- length(Y)
full_fit <- do.call(learner, args=list(train = list(Y = Y, X = X),
test = list(Y = Y, X = X)))
full_c0 <- quantile(full_fit$test_pred[full_fit$train_y == 1], p = 1 - sens)
full_est <- mean(full_fit$test_pred <= full_c0)
all_boot <- replicate(B, one_boot_scrnp(Y = Y, X = X, n = n,
correct632 = correct632,
learner = learner,
sens = sens))
if(!correct632){
mean_optimism <- mean(all_boot, na.rm = TRUE)
corrected_est <- full_est - mean_optimism
}else{
scrnp_b <- mean(all_boot, na.rm = TRUE)
# first copy each prediction n times
long_pred <- rep(full_fit$test_pred, each = n)
# now copy observed outcomes n times
long_y <- rep(Y, n)
# now compute quantile
long_c0 <- quantile(long_pred[long_y == 1], p = 1 - sens, na.rm = TRUE)
g <- mean(long_pred <= long_c0, na.rm = TRUE)
# relative overfitting rate
R <- (scrnp_b - full_est)/(g - full_est)
# 632 weight
w <- 0.632 / (1 - 0.368*R)
# weighted estimate
corrected_est <- (1 - w)*full_est + w * scrnp_b
}
return(list(scrnp = corrected_est, n_valid_boot = sum(!is.na(all_boot))))
}
#' Internal function used to perform one bootstrap sample. The function
#' \code{try}s to fit \code{learner} on a bootstrap sample. If for some reason
#' (e.g., the bootstrap sample contains no observations with \code{Y = 1})
#' the learner fails, then the function returns \code{NA}. These \code{NA}s
#' are ignored later when computing the bootstrap corrected estimate.
#' @param Y A numeric binary outcome
#' @param X A \code{data.frame} of variables for prediction.
#' @param correct632 A boolean indicating whether to use the .632 correction.
#' @param learner A wrapper that implements the desired method for building a
#' prediction algorithm. See \code{?glm_wrapper} or read the package vignette
#' for more information on formatting \code{learner}s.
#' @param sens The sensitivity constraint to use.
#' @param n Number of observations
#' @return If \code{learner} executes successfully, a numeric estimate of AUC
#' on this bootstrap sample. Otherwise the function returns \code{NA}.
one_boot_scrnp <- function(Y, X, n, correct632, learner, sens){
idx <- sample(seq_len(n), replace = TRUE)
train_Y <- Y[idx]
train_X <- X[idx, , drop = FALSE]
fit <- tryCatch({
do.call(learner, args=list(train = list(Y = train_Y, X = train_X),
test = list(Y = Y, X = X)))
}, error = function(e){
return(NA)
})
if(!(class(fit) == "logical")){
if(!correct632){
train_c0 <- stats::quantile(fit$train_pred[fit$train_y == 1], p = 1 - sens)
test_c0 <- stats::quantile(fit$test_pred[fit$test_y == 1], p = 1 - sens)
train_est <- mean(fit$train_pred <= train_c0)
test_est <- mean(fit$test_pred <= test_c0)
out <- train_est - test_est
}else{
oob_idx <- which(!(1:n %in% idx))
oob_c0 <- stats::quantile(fit$test_pred[oob_idx][fit$train_y[oob_idx] == 1],
p = 1 - sens)
out <- mean(fit$test_pred[oob_idx] <= oob_c0)
}
return(out)
}else{
return(NA)
}
}
| /R/ppv_functions.R | permissive | benkeser/nlpred | R | false | false | 29,626 | r | #' Estimates of CV SCNP
#'
#' This function computes K-fold cross-validated estimates of estimates of
#' cross-validated sensitivity-constrained rate of negative prediction (SCRNP). This
#' quantity can be interpreted as the rate of negative classification for a fixed
#' constraint on the sensitivity of a prediction algorithm. Thus, if an algorithm
#' has a high SCRNP, it will also have a high positive predictive value.
#'
#' To estimate the SCRNP using K-fold cross-validation is problematic. If
#' data are partitioned into K distinct groups, depending on the sample size
#' and choice of K, the validation sample may be quite small. In order to estimate
#' SCRNP, we require estimation of a quantile of the predictor's distribution. More extreme
#' quantiles (which correspond to high sensitivity constraints) are difficult to estimate
#' using few observations. Here, we estimate relevant nuisance parameters in the training sample and use
#' the validation sample to perform some form of bias correction -- either through
#' cross-validated targeted minimum loss-based estimation, estimating equations,
#' or one-step estimation. When aggressive learning algorithms are applied, it is
#' necessary to use an additional layer of cross-validation in the training sample
#' to estimate the nuisance parameters. This is controlled via the \code{nested_cv}
#' option below.
#'
#' @param Y A numeric vector of outcomes, assume to equal \code{0} or \code{1}.
#' @param X A \code{data.frame} or \code{matrix} of variables for prediction.
#' @param K The number of cross-validation folds (default is \code{10}).
#' @param sens The sensitivity constraint imposed on the rate of negative prediction
#' (see description).
#' @param learner A wrapper that implements the desired method for building a
#' prediction algorithm.
#' @param nested_cv A boolean indicating whether nested cross validation should
#' be used to estimate the distribution of the prediction function. Default (\code{TRUE})
#' is best choice for aggressive \code{learner}'s, while \code{FALSE} is reasonable
#' for smooth \code{learner}'s (e.g., logistic regression).
#' @param nested_K If nested cross validation is used, how many inner folds should
#' there be? Default (\code{K-1}) affords quicker computation by reusing training
#' fold learner fits.
#' @param parallel A boolean indicating whether prediction algorithms should be
#' trained in parallel. Default to \code{FALSE}.
#' @param max_cvtmle_iter Maximum number of iterations for the bias correction
#' step of the CV-TMLE estimator (default \code{10}).
#' @param cvtmle_ictol The CV-TMLE will iterate \code{max_cvtmle_iter} is reached
#' or mean of cross-validated efficient influence function is less than
#' \code{cvtmle_cvtmle_ictol}.
#' @param quantile_type Type of quantile estimator to be used. See \link[stats]{quantile}
#' for description.
#' @param prediction_list For power users: a list of predictions made by \code{learner}
#' that has a format compatible with \code{cvauc}.
#' @param ... Other arguments, not currently used
#' @importFrom SuperLearner CVFolds
#' @importFrom cvAUC ci.cvAUC
#' @importFrom stats uniroot
#' @export
#' @return An object of class \code{"scrnp"}. \describe{
#' \item{\code{est_cvtmle}}{cross-validated targeted minimum loss-based estimator of K-fold CV AUC}
#' \item{\code{iter_cvtmle}}{iterations needed to achieve convergence of CVTMLE algorithm}
#' \item{\code{cvtmle_trace}}{the value of the CVTMLE at each iteration of the targeting algorithm}
#' \item{\code{se_cvtmle}}{estimated standard error based on targeted nuisance parameters}
#' \item{\code{est_init}}{plug-in estimate of CV AUC where nuisance parameters are estimated
#' in the training sample}
#' \item{\code{est_empirical}}{the standard K-fold CV AUC estimator}
#' \item{\code{se_empirical}}{estimated standard error for the standard estimator}
#' \item{\code{est_onestep}}{cross-validated one-step estimate of K-fold CV AUC}
#' \item{\code{se_onestep}}{estimated standard error for the one-step estimator}
#' \item{\code{est_esteq}}{cross-validated estimating equations estimate of K-fold CV AUC
#' (here, equivalent to one-step, since the estimating equation is linear in SCRNP)}
#' \item{\code{se_esteq}}{estimated standard error for the estimating equations estimator
#' (same as one-step)}
#' \item{\code{folds}}{list of observation indexes in each validation fold}
#' \item{\code{ic_cvtmle}}{influence function evaluated at the targeted nuisance parameter
#' estimates}
#' \item{\code{ic_onestep}}{influence function evaluated at the training-fold-estimated
#' nuisance parameters}
#' \item{\code{ic_esteq}}{influence function evaluated at the training-fold-estimated
#' nuisance parameters}
#' \item{\code{ic_empirical}}{influence function evaluated at the validation-fold
#' estimated nuisance parameters}
#' \item{\code{prediction_list}}{a list of output from the cross-validated model training;
#' see the individual wrapper function documentation for further details}
#' }
#' @examples
#' # simulate data
#' n <- 200
#' p <- 10
#' X <- data.frame(matrix(rnorm(n*p), nrow = n, ncol = p))
#' Y <- rbinom(n, 1, plogis(X[,1] + X[,10]))
#'
#' # estimate cv scrnp of logistic regression
#' scrnp_ests <- cv_scrnp(Y = Y, X = X, K = 5,
#' nested_cv = FALSE,
#' learner = "glm_wrapper")
#'
#' # estimate cv scrnp of random forest with nested
#' # cross-validation for nuisance parameter estimation
#' \donttest{
#' scrnp_ests <- cv_scrnp(Y = Y, X = X, K = 5,
#' nested_cv = TRUE,
#' learner = "randomforest_wrapper")
#' }
cv_scrnp <- function(Y, X, K = 10, sens = 0.95,
learner = "glm_wrapper",
nested_cv = TRUE,
nested_K = K - 1,
parallel = FALSE,
max_cvtmle_iter = 10,
cvtmle_ictol = 1/length(Y),
quantile_type = 8,
prediction_list = NULL,
...){
# test inputs
assertthat::assert_that(all(Y %in% c(0,1)))
assertthat::assert_that(0 < sens & sens < 1)
if(!nested_cv){
assertthat::assert_that(K > 1)
}else{
assertthat::assert_that(K > 2)
assertthat::assert_that(nested_K > 1)
}
# sample size
n <- length(Y)
# make outer cross-validation folds
folds <- SuperLearner::CVFolds(
N = n, id = NULL, Y = Y, cvControl = list(
V = K, stratifyCV = ifelse(K <= sum(Y) & K <= sum(!Y), TRUE, FALSE),
shuffle = TRUE, validRows = NULL
)
)
# train learners in all necessary combination of folds
if(is.null(prediction_list)){
prediction_list <- .get_predictions(
learner = learner, Y = Y, X = X, K = K, nested_K = nested_K,
folds=folds, parallel = FALSE, nested_cv = nested_cv
)
}
# get quantile estimates
if(!nested_cv){
quantile_list <- lapply(prediction_list[seq_len(K)], .get_quantile, p = 1 - sens,
quantile_type = quantile_type)
}else{
quantile_list <- sapply(1:K, .get_nested_cv_quantile, quantile_type = quantile_type,
prediction_list = prediction_list, folds = folds,
p = 1 - sens, simplify = FALSE)
}
# get density estimate
if(!nested_cv){
density_list <- mapply(x = prediction_list[1:K], c0 = quantile_list,
FUN = .get_density, SIMPLIFY = FALSE)
}else{
density_list <- mapply(x = split(seq_len(K), seq_len(K)), c0 = quantile_list,
FUN = .get_density, SIMPLIFY = FALSE,
MoreArgs = list(prediction_list = prediction_list,
folds = folds, nested_cv = nested_cv))
}
# make targeting data
if(!nested_cv){
target_and_pred_data <- .make_targeting_data(prediction_list = prediction_list,
quantile_list = quantile_list,
density_list = density_list,
folds = folds, gn = mean(Y))
target_data <- target_and_pred_data$out
pred_data <- target_and_pred_data$out_pred
}else{
target_and_pred_data <- sapply(seq_len(K), .make_targeting_data,
prediction_list = prediction_list,
quantile_list = quantile_list,
density_list = density_list, folds = folds,
gn = mean(Y), nested_cv = TRUE, simplify = FALSE)
target_data <- Reduce("rbind", lapply(target_and_pred_data, "[[", "out"))
pred_data <- Reduce("rbind", lapply(target_and_pred_data, "[[", "out_pred"))
}
target_data$weight <- with(target_data, 1 - Y/gn * f_ratio)
pred_data$weight <- with(pred_data, 1 - Y/gn * f_ratio)
target_data$logit_Fn <- SuperLearner::trimLogit(target_data$Fn, trim = 1e-5)
pred_data$logit_Fn <- SuperLearner::trimLogit(pred_data$Fn, trim = 1e-5)
fluc_mod <- glm(ind ~ -1 + offset(logit_Fn) + weight,
data = target_data, family = "binomial", start = 0)
target_data$Fnstar <- fluc_mod$fitted.values
pred_data$Fnstar <- predict(fluc_mod, newdata = pred_data, type = "response")
# compute initial non-targeted estimates
init_estimates <- by(pred_data, pred_data$fold, function(x){
x$Fn[x$Y==0][1] * (1-x$gn[1]) + x$Fn[x$Y==1][1] * x$gn[1]
})
# compute parameter for each fold
cvtmle_estimates <- by(pred_data, pred_data$fold, function(x){
x$Fnstar[x$Y==0][1] * (1-x$gn[1]) + x$Fnstar[x$Y==1][1] * x$gn[1]
})
target_data$F0nstar <- NaN
target_data$F1nstar <- NaN
for(k in seq_len(K)){
target_data$F0nstar[target_data$fold == k] <- pred_data$Fnstar[pred_data$Y == 0 & pred_data$fold == k][1]
target_data$F0n[target_data$fold == k] <- pred_data$Fn[pred_data$Y == 0 & pred_data$fold == k][1]
target_data$F1nstar[target_data$fold == k] <- pred_data$Fnstar[pred_data$Y == 1 & pred_data$fold == k][1]
target_data$F1n[target_data$fold == k] <- pred_data$Fn[pred_data$Y == 1 & pred_data$fold == k][1]
}
target_data$DY_cvtmle <- with(target_data, Fnstar - (gn*F1nstar + (1 - gn)*F0nstar))
target_data$Dpsi_cvtmle <- with(target_data, weight * (ind - Fnstar))
target_data$DY_os <- with(target_data, Fn - (gn*F1n + (1 - gn)*F0n))
target_data$Dpsi_os <- with(target_data, weight * (ind - Fn))
# cvtmle estimates
est_cvtmle <- mean(cvtmle_estimates)
se_cvtmle <- sqrt(var(target_data$DY_cvtmle + target_data$Dpsi_cvtmle) / n)
# initial estimate
est_init <- mean(unlist(init_estimates))
est_onestep <- est_init + mean(target_data$DY_os + target_data$Dpsi_os)
se_onestep <- sqrt(var(target_data$DY_os + target_data$Dpsi_os) / n)
# get CV estimator
cv_empirical_estimates <- .get_cv_estim(prediction_list[1:K], sens = sens,
gn = mean(Y), quantile_type = quantile_type)
# sample split estimate
est_empirical <- mean(unlist(lapply(cv_empirical_estimates, "[[", "est")))
var_empirical <- mean(unlist(lapply(cv_empirical_estimates, function(x){
var(x$ic)
})))
ic_empirical <- Reduce(c, lapply(cv_empirical_estimates, "[[", "ic"))
se_empirical <- sqrt(var_empirical / n)
# format output
out <- list()
out$est_cvtmle <- est_cvtmle
# out$iter_cvtmle <- iter
# out$cvtmle_trace <- tmle_auc
out$se_cvtmle <- se_cvtmle
out$est_init <- est_init
out$est_empirical <- est_empirical
out$se_empirical <- se_empirical
out$est_onestep <- est_onestep
out$se_onestep <- se_onestep
out$est_esteq <- est_onestep
out$se_esteq <- se_onestep
out$se_cvtmle_type <- out$se_esteq_type <-
out$se_empirical_type <- out$se_onestep_type <- "std"
out$ic_cvtmle <- target_data$DY_cvtmle + target_data$Dpsi_cvtmle
out$ic_onestep <- target_data$DY_os + target_data$Dpsi_os
out$ic_empirical <- ic_empirical
out$prediction_list <- prediction_list
class(out) <- "scrnp"
return(out)
}
#' Helper function to get results for a single cross-validation fold
#' @param x An entry in prediction_list.
#' @param sens The sensitivity constraint.
#' @param gn An estimate of the marginal probability that \code{Y = 1}.
#' @param quantile_type The type of quantile estimate to use.
#' @param ... Other options (not currently used)
#' @importFrom stats quantile
.get_one_fold <- function(x, sens, gn, quantile_type = 8, ...){
# get quantile
c0 <- stats::quantile(x$test_pred[x$test_y == 1], p = 1 - sens, type = quantile_type)
# get influence function
F1nc0 <- mean(x$test_pred[x$test_y == 1] <= c0)
F0nc0 <- mean(x$test_pred[x$test_y == 0] <= c0)
FYnc0 <- ifelse(x$test_y == 1, F1nc0, F0nc0)
Psi <- gn * F1nc0 + (1-gn) * F0nc0
DY <- FYnc0 - Psi
# get density estimate
dens <- tryCatch({.get_density(x = x, c0 = c0,
bounded_kernel = FALSE,
x_name = "test_pred",
y_name = "test_y",
nested_cv = FALSE, prediction_list = NULL,
folds = NULL)}, error = function(e){
list(f_0_c0 = 1, f_10_c0 = 1)
})
weight <- (1 - x$test_y / gn * dens$f_0_c0/dens$f_10_c0)
ind <- as.numeric(x$test_pred <= c0)
Dpsi <- weight * (ind - FYnc0)
return(list(est = Psi, ic = DY + Dpsi))
}
#' Helper function to turn prediction_list into CV estimate of SCRNP
#' @param prediction_list Properly formatted list of predictions.
#' @param sens The sensitivity constraint.
#' @param gn The marginal probability that \code{Y = 1}.
#' @param quantile_type The type of quantile estimate to use.
#' @param ... Other options (not currently used)
.get_cv_estim <- function(prediction_list, sens, gn, quantile_type = 8, ...){
allFolds <- lapply(prediction_list, .get_one_fold, sens = sens, gn = gn,
quantile_type = quantile_type)
return(allFolds)
}
#' Helper function to get quantile for a single training fold data
#' when nested CV is NOT used.
#' @param x An entry in prediction_list.
#' @param p The quantile to get.
#' @param quantile_type The type of quantile estimate to use.
#' @importFrom stats quantile
.get_quantile <- function(x, p, quantile_type = 8){
stats::quantile(x$train_pred[x$train_y == 1], p = p, type = quantile_type)
}
#' Helper function to get quantile for a single training fold data
#' when nested CV is used.
#' @param x An entry in prediction_list.
#' @param p The quantile to get.
#' @param prediction_list Properly formatted list of predictions.
#' @param folds Cross-validation fold assignments.
#' @param quantile_type The type of quantile estimate to use.
#' @importFrom stats quantile
.get_nested_cv_quantile <- function(x, p, prediction_list, folds,
quantile_type = 8){
# find all V-1 fold CV fits with this x in them. These will be the inner
# CV fits that are needed. The first entry in this vector will correspond
# to the outer V fold CV fit, which is what we want to make the outcome
# of the long data list with.
valid_folds_idx <- which(unlist(lapply(prediction_list, function(z){
x %in% z$valid_folds }), use.names = FALSE))
# get only inner validation predictions
inner_valid_prediction_and_y_list <- lapply(prediction_list[valid_folds_idx[-1]],
function(z){
# pick out the fold that is not the outer validation fold
inner_valid_idx <- which(!(z$valid_ids %in% folds[[x]]))
# get predictions for this fold
inner_pred <- z$test_pred[inner_valid_idx]
inner_y <- z$test_y[inner_valid_idx]
return(list(test_pred = inner_pred, inner_test_y = inner_y))
})
# now get all values of psi from inner validation with Y = 1
train_psi_1 <- unlist(lapply(inner_valid_prediction_and_y_list, function(z){
z$test_pred[z$inner_test_y == 1]
}), use.names = FALSE)
# get the quantile
c0 <- quantile(train_psi_1, p = p, type = quantile_type)
return(c0)
}
#' Function to estimate density needed to evaluate standard errors.
#' @param x An entry in prediction_list.
#' @param c0 The point at which the density estimate is evaluated.
#' @param bounded_kernel Should a bounded kernel be used? Default is \code{FALSE}.
#' @param x_name Name of variable to compute density of.
#' @param y_name Name of variable to stratify density computation on.
#' @param nested_cv Use nested CV to estimate density?
#' @param prediction_list Properly formatted list of predictions.
#' @param folds Cross-validation fold assignments.
#' @param maxDens The maximum allowed value for the density.
#' @param ... Other options (not currently used)
#' @importFrom np npudensbw npudens
#' @importFrom stats predict
.get_density <- function(x, c0, bounded_kernel = FALSE,
x_name = "train_pred",
y_name = "train_y",
nested_cv = FALSE, prediction_list = NULL,
folds = NULL, maxDens = 1e3, ... ){
if(!nested_cv){
if(!bounded_kernel){
if(length(unique(x[[x_name]][x$train_y == 1])) > 1){
# density given y = 1
fitbw <- np::npudensbw(x[[x_name]][x[[y_name]] == 1])
fit <- np::npudens(fitbw)
# estimate at c0
f_10_c0 <- stats::predict(fit, edat = c0)
}else{
f_10_c0 <- maxDens
}
if(length(unique(x[[x_name]])) > 1){
# marginal density
fitbw_marg <- np::npudensbw(x[[x_name]])
fit_marg <- np::npudens(fitbw_marg)
# estimate at c0
f_0_c0 <- stats::predict(fit_marg, edat = c0)
}else{
f_0_c0 <- maxDens
}
}else{
stop("bounded density estimation removed from package")
}
}else{
# find all V-1 fold CV fits with this x in them. These will be the inner
# CV fits that are needed. The first entry in this vector will correspond
# to the outer V fold CV fit, which is what we want to make the outcome
# of the long data list with.
valid_folds_idx <- which(unlist(lapply(prediction_list, function(z){
x %in% z$valid_folds }), use.names = FALSE))
# get only inner validation predictions
inner_valid_prediction_and_y_list <- lapply(prediction_list[valid_folds_idx[-1]],
function(z){
# pick out the fold that is not the outer validation fold
inner_valid_idx <- which(!(z$valid_ids %in% folds[[x]]))
# get predictions for this fold
inner_pred <- z$test_pred[inner_valid_idx]
inner_y <- z$test_y[inner_valid_idx]
return(list(test_pred = inner_pred, inner_test_y = inner_y))
})
all_pred <- Reduce("c", lapply(inner_valid_prediction_and_y_list, "[[",
"test_pred"))
all_y <- Reduce("c", lapply(inner_valid_prediction_and_y_list, "[[",
"inner_test_y"))
if(bounded_kernel){
stop("bounded density estimation removed from package")
}else{
if(length(unique(all_pred[all_y == 1])) > 1){
# density given y = 1
fitbw <- np::npudensbw(all_pred[all_y == 1])
fit <- np::npudens(fitbw)
# estimate at c0
f_10_c0 <- stats::predict(fit, edat = c0)
}else{
f_10_c0 <- maxDens
}
if(length(unique(all_pred[all_y == 1])) > 1){
# marginal density
fitbw_marg <- np::npudensbw(all_pred)
fit_marg <- np::npudens(fitbw_marg)
# estimate at c0
f_0_c0 <- stats::predict(fit_marg, edat = c0)
}else{
f_0_c0 <- maxDens
}
}
}
# return both
return(list(f_10_c0 = f_10_c0, f_0_c0 = f_0_c0))
}
#' Helper function for making data set in proper format for CVTMLE
#'
#' @param x A numeric identifier of which entry in \code{prediction_list} to operate on.
#' @param prediction_list Properly formatted list of predictions.
#' @param quantile_list List of estimated quantile for each fold.
#' @param density_list List of density estimates for each fold.
#' @param folds Cross-validation fold assignments.
#' @param nested_cv A boolean indicating whether nested CV was used in estimation.
#' @param gn An estimate of the marginal probability that \code{Y = 1}.
.make_targeting_data <- function(x, prediction_list, quantile_list,
density_list, folds,
nested_cv = FALSE, gn){
K <- length(folds)
if(!nested_cv){
Y_vec <- Reduce(c, lapply(prediction_list, "[[", "test_y"))
Y_vec_pred <- rep(c(0,1), K)
n <- length(Y_vec)
fold_vec <- sort(rep(seq_len(K), unlist(lapply(folds, length))))
fold_vec_pred <- sort(rep(seq_len(K), 2))
gn_vec <- gn
F_nBn_vec <- Reduce(c, mapply(FUN = function(m, c0){
F_nBn_y1_at_c0 <- F_nBn_star(psi_x = c0, y = 1, train_pred = m$train_pred, train_y = m$train_y)
F_nBn_y0_at_c0 <- F_nBn_star(psi_x = c0, y = 0, train_pred = m$train_pred, train_y = m$train_y)
ifelse(m$test_y == 0, F_nBn_y0_at_c0, F_nBn_y1_at_c0)
}, c0 = quantile_list, m = prediction_list))
F_nBn_vec_pred <- Reduce(c, mapply(FUN = function(m, c0){
F_nBn_y1_at_c0 <- F_nBn_star(psi_x = c0, y = 1, train_pred = m$train_pred, train_y = m$train_y)
F_nBn_y0_at_c0 <- F_nBn_star(psi_x = c0, y = 0, train_pred = m$train_pred, train_y = m$train_y)
c(F_nBn_y0_at_c0, F_nBn_y1_at_c0)
}, c0 = quantile_list, m = prediction_list))
dens_ratio <- Reduce(c, mapply(FUN = function(m, dens){
if(dens[[1]] == 0){ dens[[1]] <- 1e-3 }
if(dens[[2]] > 1e3){ dens[[2]] <- 1e3 }
rep(dens[[2]]/dens[[1]], length(m$test_y))
}, m = prediction_list, dens = density_list))
dens_ratio_pred <- Reduce(c, mapply(FUN = function(m, dens){
if(dens[[1]] == 0){ dens[[1]] <- 1e-3 }
if(dens[[2]] > 1e3){ dens[[2]] <- 1e3 }
rep(dens[[2]]/dens[[1]], 2)
}, m = prediction_list, dens = density_list))
ind <- Reduce(c, mapply(m = prediction_list, c0 = quantile_list, function(m, c0){
as.numeric(m$test_pred <= c0)
}))
ind_pred <- c(NA, NA)
}else{
# find all V-1 fold CV fits with this x in them. These will be the inner
# CV fits that are needed. The first entry in this vector will correspond
# to the outer V fold CV fit, which is what we want to make the outcome
# of the long data list with.
valid_folds_idx <- which(unlist(lapply(prediction_list, function(z){
x %in% z$valid_folds }), use.names = FALSE))
# get only inner validation predictions
inner_valid_prediction_and_y_list <- lapply(prediction_list[valid_folds_idx[-1]],
function(z){
# pick out the fold that is not the outer validation fold
inner_valid_idx <- which(!(z$valid_ids %in% folds[[x]]))
# get predictions for this fold
inner_pred <- z$test_pred[inner_valid_idx]
inner_y <- z$test_y[inner_valid_idx]
return(list(test_pred = inner_pred, inner_test_y = inner_y))
})
Y_vec <- Reduce(c, lapply(prediction_list[x], "[[", "test_y"))
Y_vec_pred <- c(0,1)
fold_vec <- rep(x, length(Y_vec))
fold_vec_pred <- rep(x, length(Y_vec_pred))
gn_vec <- gn
n <- length(Y_vec)
F_nBn_y1_at_c0 <- F_nBn_star_nested_cv(psi_x = quantile_list[[x]], y = 1,
inner_valid_prediction_and_y_list = inner_valid_prediction_and_y_list)
F_nBn_y0_at_c0 <- F_nBn_star_nested_cv(psi_x = quantile_list[[x]], y = 0,
inner_valid_prediction_and_y_list = inner_valid_prediction_and_y_list)
F_nBn_vec <- ifelse(prediction_list[[x]]$test_y == 0, F_nBn_y0_at_c0, F_nBn_y1_at_c0)
F_nBn_vec_pred <- c(F_nBn_y0_at_c0, F_nBn_y1_at_c0)
if(density_list[[x]][[1]] == 0){ density_list[[x]][[1]] <- 1e-3 }
if(density_list[[x]][[2]] > 1e3){ density_list[[x]][[2]] <- 1e3 }
dens_ratio <- density_list[[x]][[2]]/density_list[[x]][[1]]
dens_ratio_pred <- density_list[[x]][[2]]/density_list[[x]][[1]]
ind <- as.numeric(prediction_list[[x]]$test_pred <= quantile_list[[x]])
}
out <- data.frame(fold = fold_vec, Y = Y_vec, gn = gn_vec, Fn = F_nBn_vec,
f_ratio = dens_ratio, ind = ind)
out_pred <- data.frame(fold = fold_vec_pred, Y = Y_vec_pred, gn = gn_vec,
Fn = F_nBn_vec_pred, f_ratio = dens_ratio_pred)
return(list(out = out, out_pred = out_pred))
}
#' Compute the bootstrap-corrected estimator of SCRNP.
#'
#' This estimator is computed by re-sampling with replacement (i.e., bootstrap
#' sampling) from the data. The SCRNP is computed for the learner trained on the
#' full data. The SCRNP is then computed for the learner trained on each bootstrap
#' sample. The average difference between the full data-trained learner and
#' the bootstrap-trained learner is computed to estimate the bias in the full-data-estimated
#' SCRNP. The final estimate of SCRNP is given by the difference in the full-data SCRNP
#' and the estimated bias.
#'
#' @param Y A numeric vector of outcomes, assume to equal \code{0} or \code{1}.
#' @param X A \code{data.frame} of variables for prediction.
#' @param B The number of bootstrap samples.
#' @param learner A wrapper that implements the desired method for building a
#' prediction algorithm. See \code{?glm_wrapper} or read the package vignette
#' for more information on formatting \code{learner}s.
#' @param sens The sensitivity constraint to use.
#' @param correct632 A boolean indicating whether to use the .632 correction.
#' @param ... Other options, not currently used.
#' @return A list with \code{$scrnp} the bootstrap-corrected estimate of SCRNP and
#' \code{$n_valid_boot} as the number of bootstrap of bootstrap samples where \code{learner}
#' successfully executed.
#' @export
#' @examples
#' # simulate data
#' X <- data.frame(x1 = rnorm(50))
#' Y <- rbinom(50, 1, plogis(X$x1))
#' # compute bootstrap estimate of scrnp for logistic regression
#' # use small B for fast run
#' boot <- boot_scrnp(Y = Y, X = X, B = 25, learner = "glm_wrapper")
#' @export
boot_scrnp <- function(Y, X, B = 200, learner = "glm_wrapper",
sens = 0.95, correct632 = FALSE, ...){
n <- length(Y)
full_fit <- do.call(learner, args=list(train = list(Y = Y, X = X),
test = list(Y = Y, X = X)))
full_c0 <- quantile(full_fit$test_pred[full_fit$train_y == 1], p = 1 - sens)
full_est <- mean(full_fit$test_pred <= full_c0)
all_boot <- replicate(B, one_boot_scrnp(Y = Y, X = X, n = n,
correct632 = correct632,
learner = learner,
sens = sens))
if(!correct632){
mean_optimism <- mean(all_boot, na.rm = TRUE)
corrected_est <- full_est - mean_optimism
}else{
scrnp_b <- mean(all_boot, na.rm = TRUE)
# first copy each prediction n times
long_pred <- rep(full_fit$test_pred, each = n)
# now copy observed outcomes n times
long_y <- rep(Y, n)
# now compute quantile
long_c0 <- quantile(long_pred[long_y == 1], p = 1 - sens, na.rm = TRUE)
g <- mean(long_pred <= long_c0, na.rm = TRUE)
# relative overfitting rate
R <- (scrnp_b - full_est)/(g - full_est)
# 632 weight
w <- 0.632 / (1 - 0.368*R)
# weighted estimate
corrected_est <- (1 - w)*full_est + w * scrnp_b
}
return(list(scrnp = corrected_est, n_valid_boot = sum(!is.na(all_boot))))
}
#' Internal function used to perform one bootstrap sample. The function
#' \code{try}s to fit \code{learner} on a bootstrap sample. If for some reason
#' (e.g., the bootstrap sample contains no observations with \code{Y = 1})
#' the learner fails, then the function returns \code{NA}. These \code{NA}s
#' are ignored later when computing the bootstrap corrected estimate.
#' @param Y A numeric binary outcome
#' @param X A \code{data.frame} of variables for prediction.
#' @param correct632 A boolean indicating whether to use the .632 correction.
#' @param learner A wrapper that implements the desired method for building a
#' prediction algorithm. See \code{?glm_wrapper} or read the package vignette
#' for more information on formatting \code{learner}s.
#' @param sens The sensitivity constraint to use.
#' @param n Number of observations
#' @return If \code{learner} executes successfully, a numeric estimate of AUC
#' on this bootstrap sample. Otherwise the function returns \code{NA}.
one_boot_scrnp <- function(Y, X, n, correct632, learner, sens){
idx <- sample(seq_len(n), replace = TRUE)
train_Y <- Y[idx]
train_X <- X[idx, , drop = FALSE]
fit <- tryCatch({
do.call(learner, args=list(train = list(Y = train_Y, X = train_X),
test = list(Y = Y, X = X)))
}, error = function(e){
return(NA)
})
if(!(class(fit) == "logical")){
if(!correct632){
train_c0 <- stats::quantile(fit$train_pred[fit$train_y == 1], p = 1 - sens)
test_c0 <- stats::quantile(fit$test_pred[fit$test_y == 1], p = 1 - sens)
train_est <- mean(fit$train_pred <= train_c0)
test_est <- mean(fit$test_pred <= test_c0)
out <- train_est - test_est
}else{
oob_idx <- which(!(1:n %in% idx))
oob_c0 <- stats::quantile(fit$test_pred[oob_idx][fit$train_y[oob_idx] == 1],
p = 1 - sens)
out <- mean(fit$test_pred[oob_idx] <= oob_c0)
}
return(out)
}else{
return(NA)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/global_returns_data.R
\docType{data}
\name{global_returns_data}
\alias{global_returns_data}
\title{Dataset of global financial index returns}
\format{A dataframe with 4641 observations 38 variables
\describe{
\item{country X}{daily equity returns of the predominant national equity index of country X}
\item{gsci}{daily return of the Goldman Sachs Commoditity Index (GSCI) }
\item{brent.crude}{daily return of the nearest Brent Crude Oil futures contract}
\item{msci.world}{daily return of the MSCI World equity index}
\item{global.volatility}{daily percent change in a "global equity volatility index", which is a simple average of the VIX, VDAX, and Nikkei 225 volatility indices}
}
@source \url{www.quandl.com}}
\usage{
global_returns_data
}
\description{
Daily returns for a broad set of national equity and other
major financial indices, such as oil prices and option-implied
equity volatilities
}
\keyword{datasets}
| /man/global_returns_data.Rd | no_license | bhuston/bhustonUtils | R | false | true | 1,010 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/global_returns_data.R
\docType{data}
\name{global_returns_data}
\alias{global_returns_data}
\title{Dataset of global financial index returns}
\format{A dataframe with 4641 observations 38 variables
\describe{
\item{country X}{daily equity returns of the predominant national equity index of country X}
\item{gsci}{daily return of the Goldman Sachs Commoditity Index (GSCI) }
\item{brent.crude}{daily return of the nearest Brent Crude Oil futures contract}
\item{msci.world}{daily return of the MSCI World equity index}
\item{global.volatility}{daily percent change in a "global equity volatility index", which is a simple average of the VIX, VDAX, and Nikkei 225 volatility indices}
}
@source \url{www.quandl.com}}
\usage{
global_returns_data
}
\description{
Daily returns for a broad set of national equity and other
major financial indices, such as oil prices and option-implied
equity volatilities
}
\keyword{datasets}
|
.parse_team_json <- function(json, team_id, season, season_type) {
table_length <-
json$resultSets$rowSet %>% length()
table_slug <- json$resource
df_team_season <-
.get_team_season_info(season = season,
team_id = team_id,
season_type = season_type) %>%
select(one_of(
c(
"yearSeason",
"idTeam",
"nameTeam",
"nameConference",
"nameDivision",
"urlTeamSeasonLogo"
)
))
all_data <-
1:table_length %>%
future_map_dfr(function(x) {
table_name <-
json$resultSets$name[x]
df_parameters <- json$parameters %>% flatten_df()
df_parameters <-
df_parameters %>%
purrr::set_names(names(df_parameters) %>% resolve_nba_names()) %>%
munge_nba_data()
df_parameters <-
df_parameters %>%
mutate_at(
df_parameters %>% dplyr::select(dplyr::matches("is[A-Z]")) %>% names(),
funs(ifelse(. == "Y", 1, 0) %>% as.logical())
) %>%
mutate(numberTable = x) %>%
select(numberTable, everything())
json_names <-
json$resultSets$headers[[x]]
actual_names <-
json_names %>%
resolve_nba_names()
data <-
json$resultSets$rowSet[[x]] %>%
data.frame(stringsAsFactors = F) %>%
dplyr::as_tibble()
if (data %>% nrow() == 0) {
return(invisible())
}
data <-
data %>%
purrr::set_names(actual_names) %>%
munge_nba_data() %>%
mutate(numberTable = x)
if (data %>% tibble::has_name("typeShot")) {
data <-
data %>%
dplyr::rename(typeFilter = typeShot)
}
if (table_name == "PlayersSeasonTotals") {
if (data %>% has_name("namePlayer")) {
data <-
data %>%
dplyr::rename(typeFilter = namePlayer)
}
}
if (table_name == "TeamOverall") {
data <-
data %>%
mutate(nameGroup = "Players")
}
data <-
data %>%
left_join(df_parameters) %>%
dplyr::select(one_of(names(df_parameters)), everything()) %>%
suppressMessages() %>%
select(-numberTable) %>%
mutate(nameTable = table_name) %>%
select(nameTable, everything()) %>%
dplyr::select(-one_of("idLeague")) %>%
remove_zero_sum_cols() %>%
left_join(df_team_season) %>%
mutate(slugTable = table_slug,
yearSeason = season) %>%
suppressWarnings() %>%
suppressMessages()
data <-
data %>%
dplyr::select(nameTable:slugSeason, yearSeason, everything())
data <-
data %>%
dplyr::select(-one_of(
c(
"resultGame",
"locationGame",
"nameSeasonMonth",
"segmentSeason",
"rangeDaysRest"
)
)) %>%
suppressWarnings()
if (table_name == "ByYearTeamDashboard") {
if (data %>% tibble::has_name("slugSeason")) {
data <-
data %>%
dplyr::rename(slugSeasonSearch = slugSeason)
}
if (data %>% tibble::has_name("groupByYear")) {
data <-
data %>%
dplyr::rename(slugSeason = groupByYear)
}
}
if (table_name %in% c("OverallTeamDashboard", "OverallTeamPlayerOnOffSummary")) {
return(invisible())
}
if (table_name == "AssistedBy") {
assign_nba_players()
data <-
data %>%
dplyr::rename(idPlayerAssistedBy = idPlayer) %>%
dplyr::select(-one_of("namePlayer")) %>%
left_join(
df_dict_nba_players %>% select(
idPlayerAssistedBy = idPlayer,
namePlayerAssistedBy = namePlayer
)
) %>%
suppressMessages()
data <-
data %>%
dplyr::select(dplyr::matches("type|mode|^is|^id|^name"),
everything())
}
key_cols <-
c(
"slugTable",
"nameTable",
"yearSeason",
"slugSeasonSearch",
names(df_parameters),
names(df_team_season)
) %>% unique()
nest_cols <-
names(data)[!names(data) %in% key_cols]
data %>%
nest_(key_col = 'dataTable', nest_cols = nest_cols)
})
all_data
}
.get_team_season_info <-
function(season = 2019,
team_id = 1610612751,
season_type = "Regular Season",
return_message = T) {
season_slug <-
generate_season_slug(season)
season_type_slug <-
season_type %>%
clean_to_stem()
url <-
glue::glue(
"https://stats.nba.com/stats/teaminfocommon/?leagueId=00&season={season_slug}&seasonType={season_type_slug}&teamId={team_id}"
) %>%
as.character()
json <-
url %>%
curl_json_to_vector()
names_md <-
json$resultSets$headers[[1]] %>% resolve_nba_names()
df_md <-
json$resultSets$rowSet[[1]] %>%
data.frame(stringsAsFactors = F) %>%
dplyr::as_tibble() %>%
purrr::set_names(names_md) %>%
munge_nba_data()
names_md <-
json$resultSets$headers[[2]] %>% resolve_nba_names()
df_md2 <-
json$resultSets$rowSet[[2]] %>%
data.frame(stringsAsFactors = F) %>%
dplyr::as_tibble() %>%
purrr::set_names(names_md) %>%
munge_nba_data() %>%
dplyr::rename(idSeason = slugSeason) %>%
mutate(idSeason = idSeason %>% as.numeric())
data <-
df_md %>%
left_join(df_md2) %>%
mutate(yearSeason = season) %>%
tidyr::unite(nameTeam,
cityTeam,
teamName,
sep = " ",
remove = F) %>%
suppressMessages() %>%
mutate(urlTeamSeasonLogo = generate_team_season_logo(season = yearSeason, slug_team = slugTeam))
num_names <-
data %>% select_if(is.numeric) %>% dplyr::select(-dplyr::matches("^id|^year")) %>% names()
no_teams <-
num_names[!num_names %>% str_detect("Team")]
names(data)[names(data) %in% no_teams] <-
str_c(names(data)[names(data) %in% no_teams], "Team")
if (return_message) {
glue::glue("Acquired {data$nameTeam %>% unique()} {season_slug} team information") %>% cat(fill = T)
}
data
}
#' NBA teams seasons information
#'
#' Acquires information for a teams season
#'
#'
#' @param teams vector of team names
#' @param team_ids vector of team ids
#' @param all_active_teams if \code{TRUE} returns all active teams
#' @param seasons vector of seasons
#' @param season_types type of season options include \itemize{
#' \item Regular Season
#' \item Playoffs
#' \item Pre Season
#' }
#' @param nest_data if `TRUE` nests data
#' @param return_message if `TRUE` returns a message
#'
#' @return a `tibble`
#' @export
#'
#' @examples
#' teams_seasons_info(teams = "Brooklyn Nets", seasons = c(1984, 1990, 1995, 2018), season_types = "Regular Season")
teams_seasons_info <-
function(teams = NULL,
team_ids = NULL,
all_active_teams = T,
seasons = 2019,
season_types = "Regular Season",
nest_data = F,
return_message = T) {
assign_nba_teams()
team_ids <-
nba_teams_ids(teams = teams,
team_ids = team_ids,
all_active_teams = all_active_teams)
.get_team_season_info_safe <-
purrr::possibly(.get_team_season_info, tibble())
df_input <-
expand.grid(
team_id = team_ids,
season_type = season_types,
season = seasons,
stringsAsFactors = F
) %>%
as_tibble()
all_data <-
1:nrow(df_input) %>%
future_map_dfr(function(x) {
df_row <- df_input %>% slice(x)
data <-
df_row %$%
.get_team_season_info_safe(
season = season,
team_id = team_id,
season_type = season_type,
return_message = return_message
)
Sys.sleep(time = 3)
data
})
if (nest_data) {
all_data <-
all_data %>%
nest(-c(slugSeason), .key = dataTeamSeasonPerformance)
}
all_data
}
.dictionary_team_tables <-
memoise::memoise(function() {
tibble(
nameTable = c(
"passes",
"clutch",
"splits",
"lineup",
"opponent",
"performance",
"player on off details",
"player on off summary",
"player",
"rebounding",
"shooting",
"shot chart detail",
"shots",
"team vs player",
"year over year"
),
slugTable = c(
"teamdashptpass",
"teamdashboardbyclutch",
"teamdashboardbygeneralsplits",
"teamdashlineups",
"teamdashboardbyopponent",
"teamdashboardbyteamperformance",
"teamplayeronoffdetails",
"teamplayeronoffsummary",
"teamplayerdashboard",
"teamdashptreb",
"teamdashboardbyshootingsplits",
"shotchartlineupdetail",
"teamdashptshots",
"teamvsplayer",
"teamdashboardbyyearoveryear"
)
)
})
# general -----------------------------------------------------------------
.get_team_table_data <-
function(team_id = 1610612751,
table = "year over year",
measure = "Base",
season = 2018,
mode = "PerGame",
season_type = "Regular Season",
game_id = NA,
vs_player_id = NA,
context_measure = "FGM",
playoff_round = NA,
is_plus_minus = F,
is_rank = F,
is_pace_adjusted = F,
outcome = NA,
location = NA,
month = NA,
season_segment = NA,
date_from = NA,
date_to = NA,
opponent_id = NA,
vs_conf = NA,
vs_division = NA,
game_segment = NA,
period = NA,
shot_clock = NA,
last_n_games = NA,
return_message = TRUE) {
df_team_slug_tables <-
.dictionary_team_tables()
if (return_message) {
glue::glue("Acquiring {team_id} {season} {season_type} {measure} {table} {mode} data") %>% cat(fill = T)
}
table_slug <-
df_team_slug_tables %>%
filter(nameTable == (str_to_lower(table))) %>%
pull(slugTable)
URL <- gen_url(table_slug)
measure_slug <-
generate_call_slug(x = str_to_title(measure), default_value = "Base")
mode_slug <-
generate_call_slug(x = mode, default_value = "PerGame")
context_measure_slug = generate_call_slug(x = context_measure, default_value = "")
season_slug <- generate_season_slug(season = season)
game_id_slug <-
generate_call_slug(x = game_id, default_value = 0)
vs_player_id_slug = generate_call_slug(x = vs_player_id, default_value = 0)
season_type_slug = generate_call_slug(x = season_type, default_value = "Regular+Season")
playoff_round_slug = generate_call_slug(x = playoff_round, default_value = 0)
plus_minus_slug <-
generate_call_slug(x = is_plus_minus , default_value = "N")
rank_slug <-
generate_call_slug(x = is_rank , default_value = "N")
pace_slug <-
generate_call_slug(x = is_pace_adjusted , default_value = "N")
outcome_slug <-
generate_call_slug(x = outcome , default_value = "")
location_slug <-
generate_call_slug(x = location , default_value = "")
month_slug <- generate_call_slug(x = month , default_value = 0)
season_segment_slug <-
generate_call_slug(x = season_segment , default_value = "")
date_from_slug <-
generate_call_slug(x = date_from , default_value = "")
date_to_slug <-
generate_call_slug(x = date_to , default_value = "")
opponent_id_slug <-
generate_call_slug(x = opponent_id , default_value = 0)
vs_conf_slug <-
generate_call_slug(x = season_segment , default_value = "")
vs_division_slug <-
generate_call_slug(x = vs_division , default_value = "")
game_segment_slug <-
generate_call_slug(x = game_segment , default_value = "")
period_slug <-
generate_call_slug(x = period , default_value = 0)
shot_clock_slug <-
generate_call_slug(x = shot_clock , default_value = "")
last_n_games_slug <-
generate_call_slug(x = last_n_games , default_value = 0)
params <-
list(
measureType = measure_slug,
perMode = mode_slug,
plusMinus = plus_minus_slug,
contextMeasure = context_measure_slug,
paceAdjust = pace_slug,
rank = rank_slug,
leagueId = "00",
VsPlayerID = vs_player_id_slug,
season = season_slug,
seasonType = season_type,
GameID = game_id_slug,
GROUP_ID = 0,
poRound = playoff_round_slug,
teamId = team_id,
outcome = outcome_slug,
location = location_slug,
month = month_slug,
seasonSegment = season_segment_slug,
dateFrom = date_from_slug,
dateTo = date_to_slug,
opponentTeamId = opponent_id_slug,
vsConference = vs_conf_slug,
vsDivision = vs_division_slug,
gameSegment = game_segment_slug,
period = period_slug,
shotClockRange = shot_clock_slug,
lastNGames = last_n_games_slug
)
if (table_slug == "teamvsplayer") {
names(params)[names(params) %>% str_detect("teamId")] <-
"playerId"
}
slug_param <-
.generate_param_slug(params = params)
url <-
glue::glue("{URL}?{slug_param}") %>% as.character()
resp <-
url %>%
curl() %>%
readr::read_lines()
json <-
resp %>% jsonlite::fromJSON(simplifyVector = T)
all_data <-
.parse_team_json(
json = json,
team_id = team_id,
season = season,
season_type = season_type
) %>%
mutate(idTeam = team_id,
typeMeasure = measure,
modeSearch = mode,
slugSeason = season_slug,
yearSeason = season) %>%
dplyr::select(one_of(c("nameTable", "typeMeasure", "modeSearch", "slugSeason", "yearSeason",
"typeSeason", "slugSeasonSearch",
"idTeam", "nameTeam", "nameConference", "nameDivision", "slugTable",
"urlTeamSeasonLogo",
"dataTable")
), everything()) %>%
suppressWarnings()
all_data
}
#' NBA Team table data by season
#'
#' Returns NBA team data for specified teams
#' and parameters by seasons
#'
#' @param teams vector of NBA team names
#' @param team_ids vector of team ids
#' @param all_active_teams if \code{TRUE} returns data for all active teams
#' @param tables vector of table names options include \itemize{
#' \item splits
#' \item passes
#' \item clutch
#' \item lineup
#' \item opponent
#' \item performance
#' \item player on off details
#' \item player on off summary
#' \item player
#' \item rebounding
#' \item shooting
#' \item shots
#' \item team vs player
#' \item year over year
#' }
#' @param seasons vector of seasons
#' @param modes vector of modes options include \itemize{
#' \item PerGame
#' \item Totals
#' \item MinutesPer
#' \item Per48
#' \item Per40
#' \item Per36
#' \item PerMinute
#' \item PerPossession
#' \item PerPlay
#' \item Per100Possessions
#' \item Per100Plays
#' }#'
#' @param measures vector of measure types options include \itemize{
#' \item Base
#' \item Advanced
#' \item Misc
#' \item Scoring
#' \item Four Factors
#' \item Opponent
#' \item Usage
#' \item Defense
#' }
#' @param season_types vector of season types options include \itemize{
#' \item Regular Season
#' \item Pre Season
#' \item Playoffs
#' \item All Star
#' }
#' @param playoff_rounds vector of playoff rounds options include code{0:4}
#' @param is_plus_minus \code{TRUE} returns plus minus
#' @param is_rank if \code{TRUE} returns rank
#' @param is_pace_adjusted if \code{TRUE} adjusts for pace
#' @param outcomes vector of outcomes options include \itemize{
#' \item NA
#' \item Wins
#' \item Losses
#' }
#' @param locations vector of locations options include \itemize{
#' \item NA
#' \item Home
#' \item Road
#' }
#' @param months vector of game months options include \code{0:12}
#' @param season_segments vector of season segments, options include \itemize{
#' \item NA
#' \item Post All-Star
#' \item Pre All-Star
#' }
#' @param date_from \code{NA} or date from
#' @param date_to \code{NA} or date to
#' @param opponent_ids vector of opponent ids
#' @param vs_confs vector of conferences against options include \itemize{
#' \item NA
#' \item East
#' \item West
#' }
#' @param vs_divisions vector of divisions against options include \itemize{
#' \item NA
#' \item Atlantic
#' \item Central
#' \item Northwest
#' \item Pacific
#' \item Southeast
#' \item Southwest
#' }
#' @param game_segments vector of game segments options include \itemize{
#' \item NA
#' \item First Half
#' \item Second Half
#' \item Overtime
#' }
#' @param periods vector of periods \code{0:12}
#' @param shot_clock_ranges vector of shot clock ranges options include \itemize{
#' \item NA,
#' \item 24-22
#' \item 22-18 Very Early
#' \item 18-15 Early
#' \item 15-7 Average
#' \item 7-4 Late
#' \item 4-0 Very Late
#' \item ShotClock Off
#' }
#' @param last_n_games vector of last_n games \code{0:82}
#' @param assign_to_environment if \code{TRUE} assigns data to environment
#' @param return_messages if \code{TRUE} returns message
#'
#' @return a \code{tibble}
#' @export
#'
#' @examples
#' teams_tables(teams = c("Brooklyn Nets", "New York Knicks"),
#' seasons = 2017:2018, tables = c("splits", "shooting"), measures = "Base", modes = c("PerGame", "Totals"))
#'
teams_tables <-
function(teams = NULL,
team_ids = NULL,
all_active_teams = F,
seasons = NULL,
tables = NULL,
measures = NULL,
modes = NULL,
season_types = "Regular Season",
playoff_rounds = NA,
is_plus_minus = F,
is_rank = F,
is_pace_adjusted = F,
outcomes = NA,
locations = NA,
months = NA,
season_segments = NA,
date_from = NA,
date_to = NA,
opponent_ids = NA,
vs_confs = NA,
vs_divisions = NA,
game_segments = NA,
periods = NA,
shot_clocks = NA,
last_n_games = NA,
assign_to_environment = TRUE,
return_message = TRUE) {
if (tables %>% purrr::is_null()) {
stop("Please enter tables")
}
if (modes %>% purrr::is_null()) {
stop("Please enter modes")
}
if (seasons %>% purrr::is_null()) {
stop("Enter seasons")
}
if (measures %>% purrr::is_null()) {
stop("Please enter measures")
}
team_ids <-
nba_teams_ids(teams = teams,
team_ids = team_ids,
all_active_teams = all_active_teams)
input_df <-
expand.grid(
team_id = team_ids,
table = tables,
measure = measures,
season = seasons,
mode = modes,
season_type = season_types,
playoff_round = playoff_rounds,
is_plus_minus = is_plus_minus,
is_rank = is_rank,
is_pace_adjusted = is_pace_adjusted,
outcome = outcomes,
location = locations,
month = months,
season_segment = season_segments,
date_from = date_from,
date_to = date_to,
opponent_id = opponent_ids,
vs_conf = vs_confs,
vs_division = vs_divisions,
game_segment = game_segments,
period = periods,
shot_clock = shot_clocks,
last_n_games = last_n_games,
stringsAsFactors = F
) %>%
dplyr::as_tibble()
.get_team_table_data_safe <-
purrr::possibly(.get_team_table_data, tibble())
all_data <-
1:nrow(input_df) %>%
future_map_dfr(function(x) {
df_row <-
input_df %>% slice(x)
df_row %$%
.get_team_table_data_safe(
team_id = team_id,
table = table,
measure = measure,
season = season,
mode = mode,
season_type = season_type,
game_id = NA,
vs_player_id = NA,
context_measure = NA,
playoff_round = playoff_round,
is_plus_minus = is_plus_minus,
is_rank = is_rank,
is_pace_adjusted = is_pace_adjusted,
outcome = outcome,
location = location,
month = month,
season_segment = season_segment,
date_from = date_from,
date_to = date_to,
opponent_id = opponent_id,
vs_conf = vs_conf,
vs_division = vs_division,
game_segment = game_segment,
period = period,
shot_clock = shot_clock,
last_n_games = last_n_games,
return_message = return_message
)
})
df_dict_table_names <-
.dictionary_team_tables()
table_names <-
df_dict_table_names$nameTable %>% map_chr(function(x) {
generate_data_name(x = x, result = "Team")
})
df_dict_table_names <-
df_dict_table_names %>%
mutate(tableName = table_names) %>%
select(-nameTable) %>%
dplyr::rename(tableSlugName = tableName)
all_data <-
all_data %>%
left_join(df_dict_table_names) %>%
select(tableSlugName, nameTable, everything()) %>%
suppressMessages() %>%
unique()
if (assign_to_environment) {
all_tables <-
all_data$tableSlugName %>%
unique()
all_tables %>%
walk(function(table) {
df_tables <-
all_data %>%
filter(tableSlugName == table) %>%
select(-one_of(c("slugTable", "tableSlugName"))) %>%
unnest_legacy() %>%
remove_na_columns()
has_measure <- df_tables %>% tibble::has_name("typeMeasure")
if (has_measure) {
measures <-
df_tables$typeMeasure %>% unique()
measures %>%
walk(function(measure) {
table_name <-
table %>%
str_c(measure)
df_table <-
df_tables %>%
filter(typeMeasure == measure) %>%
unnest_legacy() %>%
remove_na_columns() %>%
distinct()
assign(x = table_name,
value = df_table,
envir = .GlobalEnv)
})
} else{
df_table <-
df_tables %>%
unnest_legacy() %>%
remove_na_columns() %>%
distinct()
assign(x = table,
value = df_table,
envir = .GlobalEnv)
}
})
}
all_data %>%
remove_na_columns()
}
# Shot Chart --------------------------------------------------------------
.get_team_shot_chart <-
function(season = 2018,
team_id = 1610612739,
season_type = "Regular Season",
game_id = NA,
opponent_id = NULL,
measure = "FGA",
period = 0,
month = 0,
date_from = NA,
date_to = NA,
show_shots = TRUE,
return_message = T) {
assign_nba_teams()
team <- df_dict_nba_teams %>%
filter(idTeam == team_id) %>%
pull(nameTeam)
table_id <- 1
player_id <- 0
slugSeason <- generate_season_slug(season = season)
if (return_message) {
glue::glue("{team} {slugSeason} shot data") %>% cat(fill = T)
}
URL <- gen_url("shotchartdetail")
show_shot_slug <- case_when(show_shots ~ 1,
TRUE ~ 0)
game_id_slug <-
ifelse(game_id %>% is.na(), "",
game_id)
date_from_slug <-
ifelse(date_from %>% is.na(), "",
date_from %>% clean_to_stem())
date_to_slug <-
ifelse(date_to %>% is.na(), "",
date_to %>% clean_to_stem())
params <- list(
SeasonType = season_type,
LeagueID = "00",
Season = slugSeason,
PlayerID = player_id,
TeamID = team_id,
GameID = game_id_slug,
ContextMeasure = measure,
PlayerPosition = "",
DateFrom = date_to_slug,
DateTo = date_from_slug,
GameSegment = "",
LastNGames = "0",
Location = "",
Month = "0",
OpponentTeamID = "0",
Outcome = "",
SeasonSegment = "",
VSConference = "",
VSDivision = "",
RookieYear = "",
Period = period,
StartPeriod = "",
EndPeriod = "",
showShots = show_shot_slug
)
#params <- utils::modifyList(params, list(...))
slug_param <-
.generate_param_slug(params = params)
url <-
glue::glue("{URL}?{slug_param}") %>% as.character()
resp <-
url %>%
curl() %>%
readr::read_lines()
json <-
resp %>% jsonlite::fromJSON(simplifyVector = T)
df_params <- json$parameters %>% flatten_df() %>% as_tibble()
param_names <- names(df_params) %>% resolve_nba_names()
df_params <-
df_params %>%
purrr::set_names(param_names) %>%
mutate(numberTable = table_id,
nameTeam = team)
data <-
json %>%
nba_json_to_df(table_id = table_id) %>%
mutate(numberTable = table_id,
slugSeason = slugSeason,
yearSeason = season) %>%
munge_nba_data()
data <-
data %>%
left_join(df_params) %>%
select(one_of(param_names), everything()) %>%
remove_zero_sum_cols() %>%
select(-one_of(c("numberTable", "idLeague"))) %>%
mutate_if(is.character,
funs(ifelse(. == "", NA, .))) %>%
remove_na_columns() %>%
mutate_at(c("locationX", "locationY"),
funs(. %>% as.character() %>% readr::parse_number())) %>%
suppressWarnings() %>%
suppressMessages() %>%
select(dplyr::matches("yearSeason", "slugSeason", "nameTeam"), everything()) %>%
tidyr::separate(zoneArea, into = c("nameZone", "slugZone"), sep = "\\(") %>%
mutate(slugZone = slugZone %>% str_replace_all("\\)", ""))
data
}
#' Get teams seasons shot charts
#'
#' @param teams vector of team names
#' @param team_ids vector of team ids
#' @param all_active_teams if `TRUE` returns all active teams
#' @param season_types vector of season types options \itemize{
#' \item Pre Season
#' \item Regular Season
#' \item Playoffs
#' \item All Star
#' }
#' @param seasons vector of seasons
#' @param measures vector of measures
#' @param periods vector of periods
#' @param months vector of months
#' @param date_from date from
#' @param date_to date to
#' @param nest_data if `TRUE` nests data
#' @param return_message if `TRUE` returns a message
#'
#' @return a \code{tibble}
#' @export
#'
#' @examples
#' teams_shots(teams = "Brooklyn Nets",
#' seasons = 2018)
teams_shots <-
function(teams = NULL ,
team_ids = NULL,
all_active_teams = F,
season_types = "Regular Season",
seasons = 2018,
measures = "FGA",
periods = 0,
months = 0,
date_from = NA,
date_to = NA,
nest_data = F,
return_message = T
){
team_ids <-
nba_teams_ids(teams = teams,
team_ids = team_ids,
all_active_teams = all_active_teams)
input_df <-
expand.grid(
team_id = team_ids,
season_type = season_types,
season = seasons,
measure = measures,
period = periods,
month = months,
date_from = NA,
date_to = NA,
stringsAsFactors = F
) %>%
as_tibble()
.get_team_shot_chart_safe <-
purrr::possibly(.get_team_shot_chart, tibble())
all_data <-
1:nrow(input_df) %>%
future_map_dfr(function(x) {
df_row <-
input_df %>% slice(x)
df_row %$%
.get_team_shot_chart_safe(
team_id = team_id,
season_type = season_type,
season = seasons,
return_message = return_message
)
})
if (nest_data) {
all_data <-
all_data %>%
nest(-c('yearSeason', "slugSeason", "idTeam", "nameTeam"), .key = dataShotChart)
}
all_data
}
| /R/team.R | no_license | Ollstar/nbastatR | R | false | false | 28,795 | r |
.parse_team_json <- function(json, team_id, season, season_type) {
table_length <-
json$resultSets$rowSet %>% length()
table_slug <- json$resource
df_team_season <-
.get_team_season_info(season = season,
team_id = team_id,
season_type = season_type) %>%
select(one_of(
c(
"yearSeason",
"idTeam",
"nameTeam",
"nameConference",
"nameDivision",
"urlTeamSeasonLogo"
)
))
all_data <-
1:table_length %>%
future_map_dfr(function(x) {
table_name <-
json$resultSets$name[x]
df_parameters <- json$parameters %>% flatten_df()
df_parameters <-
df_parameters %>%
purrr::set_names(names(df_parameters) %>% resolve_nba_names()) %>%
munge_nba_data()
df_parameters <-
df_parameters %>%
mutate_at(
df_parameters %>% dplyr::select(dplyr::matches("is[A-Z]")) %>% names(),
funs(ifelse(. == "Y", 1, 0) %>% as.logical())
) %>%
mutate(numberTable = x) %>%
select(numberTable, everything())
json_names <-
json$resultSets$headers[[x]]
actual_names <-
json_names %>%
resolve_nba_names()
data <-
json$resultSets$rowSet[[x]] %>%
data.frame(stringsAsFactors = F) %>%
dplyr::as_tibble()
if (data %>% nrow() == 0) {
return(invisible())
}
data <-
data %>%
purrr::set_names(actual_names) %>%
munge_nba_data() %>%
mutate(numberTable = x)
if (data %>% tibble::has_name("typeShot")) {
data <-
data %>%
dplyr::rename(typeFilter = typeShot)
}
if (table_name == "PlayersSeasonTotals") {
if (data %>% has_name("namePlayer")) {
data <-
data %>%
dplyr::rename(typeFilter = namePlayer)
}
}
if (table_name == "TeamOverall") {
data <-
data %>%
mutate(nameGroup = "Players")
}
data <-
data %>%
left_join(df_parameters) %>%
dplyr::select(one_of(names(df_parameters)), everything()) %>%
suppressMessages() %>%
select(-numberTable) %>%
mutate(nameTable = table_name) %>%
select(nameTable, everything()) %>%
dplyr::select(-one_of("idLeague")) %>%
remove_zero_sum_cols() %>%
left_join(df_team_season) %>%
mutate(slugTable = table_slug,
yearSeason = season) %>%
suppressWarnings() %>%
suppressMessages()
data <-
data %>%
dplyr::select(nameTable:slugSeason, yearSeason, everything())
data <-
data %>%
dplyr::select(-one_of(
c(
"resultGame",
"locationGame",
"nameSeasonMonth",
"segmentSeason",
"rangeDaysRest"
)
)) %>%
suppressWarnings()
if (table_name == "ByYearTeamDashboard") {
if (data %>% tibble::has_name("slugSeason")) {
data <-
data %>%
dplyr::rename(slugSeasonSearch = slugSeason)
}
if (data %>% tibble::has_name("groupByYear")) {
data <-
data %>%
dplyr::rename(slugSeason = groupByYear)
}
}
if (table_name %in% c("OverallTeamDashboard", "OverallTeamPlayerOnOffSummary")) {
return(invisible())
}
if (table_name == "AssistedBy") {
assign_nba_players()
data <-
data %>%
dplyr::rename(idPlayerAssistedBy = idPlayer) %>%
dplyr::select(-one_of("namePlayer")) %>%
left_join(
df_dict_nba_players %>% select(
idPlayerAssistedBy = idPlayer,
namePlayerAssistedBy = namePlayer
)
) %>%
suppressMessages()
data <-
data %>%
dplyr::select(dplyr::matches("type|mode|^is|^id|^name"),
everything())
}
key_cols <-
c(
"slugTable",
"nameTable",
"yearSeason",
"slugSeasonSearch",
names(df_parameters),
names(df_team_season)
) %>% unique()
nest_cols <-
names(data)[!names(data) %in% key_cols]
data %>%
nest_(key_col = 'dataTable', nest_cols = nest_cols)
})
all_data
}
.get_team_season_info <-
function(season = 2019,
team_id = 1610612751,
season_type = "Regular Season",
return_message = T) {
season_slug <-
generate_season_slug(season)
season_type_slug <-
season_type %>%
clean_to_stem()
url <-
glue::glue(
"https://stats.nba.com/stats/teaminfocommon/?leagueId=00&season={season_slug}&seasonType={season_type_slug}&teamId={team_id}"
) %>%
as.character()
json <-
url %>%
curl_json_to_vector()
names_md <-
json$resultSets$headers[[1]] %>% resolve_nba_names()
df_md <-
json$resultSets$rowSet[[1]] %>%
data.frame(stringsAsFactors = F) %>%
dplyr::as_tibble() %>%
purrr::set_names(names_md) %>%
munge_nba_data()
names_md <-
json$resultSets$headers[[2]] %>% resolve_nba_names()
df_md2 <-
json$resultSets$rowSet[[2]] %>%
data.frame(stringsAsFactors = F) %>%
dplyr::as_tibble() %>%
purrr::set_names(names_md) %>%
munge_nba_data() %>%
dplyr::rename(idSeason = slugSeason) %>%
mutate(idSeason = idSeason %>% as.numeric())
data <-
df_md %>%
left_join(df_md2) %>%
mutate(yearSeason = season) %>%
tidyr::unite(nameTeam,
cityTeam,
teamName,
sep = " ",
remove = F) %>%
suppressMessages() %>%
mutate(urlTeamSeasonLogo = generate_team_season_logo(season = yearSeason, slug_team = slugTeam))
num_names <-
data %>% select_if(is.numeric) %>% dplyr::select(-dplyr::matches("^id|^year")) %>% names()
no_teams <-
num_names[!num_names %>% str_detect("Team")]
names(data)[names(data) %in% no_teams] <-
str_c(names(data)[names(data) %in% no_teams], "Team")
if (return_message) {
glue::glue("Acquired {data$nameTeam %>% unique()} {season_slug} team information") %>% cat(fill = T)
}
data
}
#' NBA teams seasons information
#'
#' Acquires information for a teams season
#'
#'
#' @param teams vector of team names
#' @param team_ids vector of team ids
#' @param all_active_teams if \code{TRUE} returns all active teams
#' @param seasons vector of seasons
#' @param season_types type of season options include \itemize{
#' \item Regular Season
#' \item Playoffs
#' \item Pre Season
#' }
#' @param nest_data if `TRUE` nests data
#' @param return_message if `TRUE` returns a message
#'
#' @return a `tibble`
#' @export
#'
#' @examples
#' teams_seasons_info(teams = "Brooklyn Nets", seasons = c(1984, 1990, 1995, 2018), season_types = "Regular Season")
teams_seasons_info <-
function(teams = NULL,
team_ids = NULL,
all_active_teams = T,
seasons = 2019,
season_types = "Regular Season",
nest_data = F,
return_message = T) {
assign_nba_teams()
team_ids <-
nba_teams_ids(teams = teams,
team_ids = team_ids,
all_active_teams = all_active_teams)
.get_team_season_info_safe <-
purrr::possibly(.get_team_season_info, tibble())
df_input <-
expand.grid(
team_id = team_ids,
season_type = season_types,
season = seasons,
stringsAsFactors = F
) %>%
as_tibble()
all_data <-
1:nrow(df_input) %>%
future_map_dfr(function(x) {
df_row <- df_input %>% slice(x)
data <-
df_row %$%
.get_team_season_info_safe(
season = season,
team_id = team_id,
season_type = season_type,
return_message = return_message
)
Sys.sleep(time = 3)
data
})
if (nest_data) {
all_data <-
all_data %>%
nest(-c(slugSeason), .key = dataTeamSeasonPerformance)
}
all_data
}
.dictionary_team_tables <-
memoise::memoise(function() {
tibble(
nameTable = c(
"passes",
"clutch",
"splits",
"lineup",
"opponent",
"performance",
"player on off details",
"player on off summary",
"player",
"rebounding",
"shooting",
"shot chart detail",
"shots",
"team vs player",
"year over year"
),
slugTable = c(
"teamdashptpass",
"teamdashboardbyclutch",
"teamdashboardbygeneralsplits",
"teamdashlineups",
"teamdashboardbyopponent",
"teamdashboardbyteamperformance",
"teamplayeronoffdetails",
"teamplayeronoffsummary",
"teamplayerdashboard",
"teamdashptreb",
"teamdashboardbyshootingsplits",
"shotchartlineupdetail",
"teamdashptshots",
"teamvsplayer",
"teamdashboardbyyearoveryear"
)
)
})
# general -----------------------------------------------------------------
.get_team_table_data <-
function(team_id = 1610612751,
table = "year over year",
measure = "Base",
season = 2018,
mode = "PerGame",
season_type = "Regular Season",
game_id = NA,
vs_player_id = NA,
context_measure = "FGM",
playoff_round = NA,
is_plus_minus = F,
is_rank = F,
is_pace_adjusted = F,
outcome = NA,
location = NA,
month = NA,
season_segment = NA,
date_from = NA,
date_to = NA,
opponent_id = NA,
vs_conf = NA,
vs_division = NA,
game_segment = NA,
period = NA,
shot_clock = NA,
last_n_games = NA,
return_message = TRUE) {
df_team_slug_tables <-
.dictionary_team_tables()
if (return_message) {
glue::glue("Acquiring {team_id} {season} {season_type} {measure} {table} {mode} data") %>% cat(fill = T)
}
table_slug <-
df_team_slug_tables %>%
filter(nameTable == (str_to_lower(table))) %>%
pull(slugTable)
URL <- gen_url(table_slug)
measure_slug <-
generate_call_slug(x = str_to_title(measure), default_value = "Base")
mode_slug <-
generate_call_slug(x = mode, default_value = "PerGame")
context_measure_slug = generate_call_slug(x = context_measure, default_value = "")
season_slug <- generate_season_slug(season = season)
game_id_slug <-
generate_call_slug(x = game_id, default_value = 0)
vs_player_id_slug = generate_call_slug(x = vs_player_id, default_value = 0)
season_type_slug = generate_call_slug(x = season_type, default_value = "Regular+Season")
playoff_round_slug = generate_call_slug(x = playoff_round, default_value = 0)
plus_minus_slug <-
generate_call_slug(x = is_plus_minus , default_value = "N")
rank_slug <-
generate_call_slug(x = is_rank , default_value = "N")
pace_slug <-
generate_call_slug(x = is_pace_adjusted , default_value = "N")
outcome_slug <-
generate_call_slug(x = outcome , default_value = "")
location_slug <-
generate_call_slug(x = location , default_value = "")
month_slug <- generate_call_slug(x = month , default_value = 0)
season_segment_slug <-
generate_call_slug(x = season_segment , default_value = "")
date_from_slug <-
generate_call_slug(x = date_from , default_value = "")
date_to_slug <-
generate_call_slug(x = date_to , default_value = "")
opponent_id_slug <-
generate_call_slug(x = opponent_id , default_value = 0)
vs_conf_slug <-
generate_call_slug(x = season_segment , default_value = "")
vs_division_slug <-
generate_call_slug(x = vs_division , default_value = "")
game_segment_slug <-
generate_call_slug(x = game_segment , default_value = "")
period_slug <-
generate_call_slug(x = period , default_value = 0)
shot_clock_slug <-
generate_call_slug(x = shot_clock , default_value = "")
last_n_games_slug <-
generate_call_slug(x = last_n_games , default_value = 0)
params <-
list(
measureType = measure_slug,
perMode = mode_slug,
plusMinus = plus_minus_slug,
contextMeasure = context_measure_slug,
paceAdjust = pace_slug,
rank = rank_slug,
leagueId = "00",
VsPlayerID = vs_player_id_slug,
season = season_slug,
seasonType = season_type,
GameID = game_id_slug,
GROUP_ID = 0,
poRound = playoff_round_slug,
teamId = team_id,
outcome = outcome_slug,
location = location_slug,
month = month_slug,
seasonSegment = season_segment_slug,
dateFrom = date_from_slug,
dateTo = date_to_slug,
opponentTeamId = opponent_id_slug,
vsConference = vs_conf_slug,
vsDivision = vs_division_slug,
gameSegment = game_segment_slug,
period = period_slug,
shotClockRange = shot_clock_slug,
lastNGames = last_n_games_slug
)
if (table_slug == "teamvsplayer") {
names(params)[names(params) %>% str_detect("teamId")] <-
"playerId"
}
slug_param <-
.generate_param_slug(params = params)
url <-
glue::glue("{URL}?{slug_param}") %>% as.character()
resp <-
url %>%
curl() %>%
readr::read_lines()
json <-
resp %>% jsonlite::fromJSON(simplifyVector = T)
all_data <-
.parse_team_json(
json = json,
team_id = team_id,
season = season,
season_type = season_type
) %>%
mutate(idTeam = team_id,
typeMeasure = measure,
modeSearch = mode,
slugSeason = season_slug,
yearSeason = season) %>%
dplyr::select(one_of(c("nameTable", "typeMeasure", "modeSearch", "slugSeason", "yearSeason",
"typeSeason", "slugSeasonSearch",
"idTeam", "nameTeam", "nameConference", "nameDivision", "slugTable",
"urlTeamSeasonLogo",
"dataTable")
), everything()) %>%
suppressWarnings()
all_data
}
#' NBA Team table data by season
#'
#' Returns NBA team data for specified teams
#' and parameters by seasons
#'
#' @param teams vector of NBA team names
#' @param team_ids vector of team ids
#' @param all_active_teams if \code{TRUE} returns data for all active teams
#' @param tables vector of table names options include \itemize{
#' \item splits
#' \item passes
#' \item clutch
#' \item lineup
#' \item opponent
#' \item performance
#' \item player on off details
#' \item player on off summary
#' \item player
#' \item rebounding
#' \item shooting
#' \item shots
#' \item team vs player
#' \item year over year
#' }
#' @param seasons vector of seasons
#' @param modes vector of modes options include \itemize{
#' \item PerGame
#' \item Totals
#' \item MinutesPer
#' \item Per48
#' \item Per40
#' \item Per36
#' \item PerMinute
#' \item PerPossession
#' \item PerPlay
#' \item Per100Possessions
#' \item Per100Plays
#' }#'
#' @param measures vector of measure types options include \itemize{
#' \item Base
#' \item Advanced
#' \item Misc
#' \item Scoring
#' \item Four Factors
#' \item Opponent
#' \item Usage
#' \item Defense
#' }
#' @param season_types vector of season types options include \itemize{
#' \item Regular Season
#' \item Pre Season
#' \item Playoffs
#' \item All Star
#' }
#' @param playoff_rounds vector of playoff rounds options include code{0:4}
#' @param is_plus_minus \code{TRUE} returns plus minus
#' @param is_rank if \code{TRUE} returns rank
#' @param is_pace_adjusted if \code{TRUE} adjusts for pace
#' @param outcomes vector of outcomes options include \itemize{
#' \item NA
#' \item Wins
#' \item Losses
#' }
#' @param locations vector of locations options include \itemize{
#' \item NA
#' \item Home
#' \item Road
#' }
#' @param months vector of game months options include \code{0:12}
#' @param season_segments vector of season segments, options include \itemize{
#' \item NA
#' \item Post All-Star
#' \item Pre All-Star
#' }
#' @param date_from \code{NA} or date from
#' @param date_to \code{NA} or date to
#' @param opponent_ids vector of opponent ids
#' @param vs_confs vector of conferences against options include \itemize{
#' \item NA
#' \item East
#' \item West
#' }
#' @param vs_divisions vector of divisions against options include \itemize{
#' \item NA
#' \item Atlantic
#' \item Central
#' \item Northwest
#' \item Pacific
#' \item Southeast
#' \item Southwest
#' }
#' @param game_segments vector of game segments options include \itemize{
#' \item NA
#' \item First Half
#' \item Second Half
#' \item Overtime
#' }
#' @param periods vector of periods \code{0:12}
#' @param shot_clock_ranges vector of shot clock ranges options include \itemize{
#' \item NA,
#' \item 24-22
#' \item 22-18 Very Early
#' \item 18-15 Early
#' \item 15-7 Average
#' \item 7-4 Late
#' \item 4-0 Very Late
#' \item ShotClock Off
#' }
#' @param last_n_games vector of last_n games \code{0:82}
#' @param assign_to_environment if \code{TRUE} assigns data to environment
#' @param return_messages if \code{TRUE} returns message
#'
#' @return a \code{tibble}
#' @export
#'
#' @examples
#' teams_tables(teams = c("Brooklyn Nets", "New York Knicks"),
#' seasons = 2017:2018, tables = c("splits", "shooting"), measures = "Base", modes = c("PerGame", "Totals"))
#'
teams_tables <-
function(teams = NULL,
team_ids = NULL,
all_active_teams = F,
seasons = NULL,
tables = NULL,
measures = NULL,
modes = NULL,
season_types = "Regular Season",
playoff_rounds = NA,
is_plus_minus = F,
is_rank = F,
is_pace_adjusted = F,
outcomes = NA,
locations = NA,
months = NA,
season_segments = NA,
date_from = NA,
date_to = NA,
opponent_ids = NA,
vs_confs = NA,
vs_divisions = NA,
game_segments = NA,
periods = NA,
shot_clocks = NA,
last_n_games = NA,
assign_to_environment = TRUE,
return_message = TRUE) {
if (tables %>% purrr::is_null()) {
stop("Please enter tables")
}
if (modes %>% purrr::is_null()) {
stop("Please enter modes")
}
if (seasons %>% purrr::is_null()) {
stop("Enter seasons")
}
if (measures %>% purrr::is_null()) {
stop("Please enter measures")
}
team_ids <-
nba_teams_ids(teams = teams,
team_ids = team_ids,
all_active_teams = all_active_teams)
input_df <-
expand.grid(
team_id = team_ids,
table = tables,
measure = measures,
season = seasons,
mode = modes,
season_type = season_types,
playoff_round = playoff_rounds,
is_plus_minus = is_plus_minus,
is_rank = is_rank,
is_pace_adjusted = is_pace_adjusted,
outcome = outcomes,
location = locations,
month = months,
season_segment = season_segments,
date_from = date_from,
date_to = date_to,
opponent_id = opponent_ids,
vs_conf = vs_confs,
vs_division = vs_divisions,
game_segment = game_segments,
period = periods,
shot_clock = shot_clocks,
last_n_games = last_n_games,
stringsAsFactors = F
) %>%
dplyr::as_tibble()
.get_team_table_data_safe <-
purrr::possibly(.get_team_table_data, tibble())
all_data <-
1:nrow(input_df) %>%
future_map_dfr(function(x) {
df_row <-
input_df %>% slice(x)
df_row %$%
.get_team_table_data_safe(
team_id = team_id,
table = table,
measure = measure,
season = season,
mode = mode,
season_type = season_type,
game_id = NA,
vs_player_id = NA,
context_measure = NA,
playoff_round = playoff_round,
is_plus_minus = is_plus_minus,
is_rank = is_rank,
is_pace_adjusted = is_pace_adjusted,
outcome = outcome,
location = location,
month = month,
season_segment = season_segment,
date_from = date_from,
date_to = date_to,
opponent_id = opponent_id,
vs_conf = vs_conf,
vs_division = vs_division,
game_segment = game_segment,
period = period,
shot_clock = shot_clock,
last_n_games = last_n_games,
return_message = return_message
)
})
df_dict_table_names <-
.dictionary_team_tables()
table_names <-
df_dict_table_names$nameTable %>% map_chr(function(x) {
generate_data_name(x = x, result = "Team")
})
df_dict_table_names <-
df_dict_table_names %>%
mutate(tableName = table_names) %>%
select(-nameTable) %>%
dplyr::rename(tableSlugName = tableName)
all_data <-
all_data %>%
left_join(df_dict_table_names) %>%
select(tableSlugName, nameTable, everything()) %>%
suppressMessages() %>%
unique()
if (assign_to_environment) {
all_tables <-
all_data$tableSlugName %>%
unique()
all_tables %>%
walk(function(table) {
df_tables <-
all_data %>%
filter(tableSlugName == table) %>%
select(-one_of(c("slugTable", "tableSlugName"))) %>%
unnest_legacy() %>%
remove_na_columns()
has_measure <- df_tables %>% tibble::has_name("typeMeasure")
if (has_measure) {
measures <-
df_tables$typeMeasure %>% unique()
measures %>%
walk(function(measure) {
table_name <-
table %>%
str_c(measure)
df_table <-
df_tables %>%
filter(typeMeasure == measure) %>%
unnest_legacy() %>%
remove_na_columns() %>%
distinct()
assign(x = table_name,
value = df_table,
envir = .GlobalEnv)
})
} else{
df_table <-
df_tables %>%
unnest_legacy() %>%
remove_na_columns() %>%
distinct()
assign(x = table,
value = df_table,
envir = .GlobalEnv)
}
})
}
all_data %>%
remove_na_columns()
}
# Shot Chart --------------------------------------------------------------
.get_team_shot_chart <-
function(season = 2018,
team_id = 1610612739,
season_type = "Regular Season",
game_id = NA,
opponent_id = NULL,
measure = "FGA",
period = 0,
month = 0,
date_from = NA,
date_to = NA,
show_shots = TRUE,
return_message = T) {
assign_nba_teams()
team <- df_dict_nba_teams %>%
filter(idTeam == team_id) %>%
pull(nameTeam)
table_id <- 1
player_id <- 0
slugSeason <- generate_season_slug(season = season)
if (return_message) {
glue::glue("{team} {slugSeason} shot data") %>% cat(fill = T)
}
URL <- gen_url("shotchartdetail")
show_shot_slug <- case_when(show_shots ~ 1,
TRUE ~ 0)
game_id_slug <-
ifelse(game_id %>% is.na(), "",
game_id)
date_from_slug <-
ifelse(date_from %>% is.na(), "",
date_from %>% clean_to_stem())
date_to_slug <-
ifelse(date_to %>% is.na(), "",
date_to %>% clean_to_stem())
params <- list(
SeasonType = season_type,
LeagueID = "00",
Season = slugSeason,
PlayerID = player_id,
TeamID = team_id,
GameID = game_id_slug,
ContextMeasure = measure,
PlayerPosition = "",
DateFrom = date_to_slug,
DateTo = date_from_slug,
GameSegment = "",
LastNGames = "0",
Location = "",
Month = "0",
OpponentTeamID = "0",
Outcome = "",
SeasonSegment = "",
VSConference = "",
VSDivision = "",
RookieYear = "",
Period = period,
StartPeriod = "",
EndPeriod = "",
showShots = show_shot_slug
)
#params <- utils::modifyList(params, list(...))
slug_param <-
.generate_param_slug(params = params)
url <-
glue::glue("{URL}?{slug_param}") %>% as.character()
resp <-
url %>%
curl() %>%
readr::read_lines()
json <-
resp %>% jsonlite::fromJSON(simplifyVector = T)
df_params <- json$parameters %>% flatten_df() %>% as_tibble()
param_names <- names(df_params) %>% resolve_nba_names()
df_params <-
df_params %>%
purrr::set_names(param_names) %>%
mutate(numberTable = table_id,
nameTeam = team)
data <-
json %>%
nba_json_to_df(table_id = table_id) %>%
mutate(numberTable = table_id,
slugSeason = slugSeason,
yearSeason = season) %>%
munge_nba_data()
data <-
data %>%
left_join(df_params) %>%
select(one_of(param_names), everything()) %>%
remove_zero_sum_cols() %>%
select(-one_of(c("numberTable", "idLeague"))) %>%
mutate_if(is.character,
funs(ifelse(. == "", NA, .))) %>%
remove_na_columns() %>%
mutate_at(c("locationX", "locationY"),
funs(. %>% as.character() %>% readr::parse_number())) %>%
suppressWarnings() %>%
suppressMessages() %>%
select(dplyr::matches("yearSeason", "slugSeason", "nameTeam"), everything()) %>%
tidyr::separate(zoneArea, into = c("nameZone", "slugZone"), sep = "\\(") %>%
mutate(slugZone = slugZone %>% str_replace_all("\\)", ""))
data
}
#' Get teams seasons shot charts
#'
#' @param teams vector of team names
#' @param team_ids vector of team ids
#' @param all_active_teams if `TRUE` returns all active teams
#' @param season_types vector of season types options \itemize{
#' \item Pre Season
#' \item Regular Season
#' \item Playoffs
#' \item All Star
#' }
#' @param seasons vector of seasons
#' @param measures vector of measures
#' @param periods vector of periods
#' @param months vector of months
#' @param date_from date from
#' @param date_to date to
#' @param nest_data if `TRUE` nests data
#' @param return_message if `TRUE` returns a message
#'
#' @return a \code{tibble}
#' @export
#'
#' @examples
#' teams_shots(teams = "Brooklyn Nets",
#' seasons = 2018)
teams_shots <-
function(teams = NULL ,
team_ids = NULL,
all_active_teams = F,
season_types = "Regular Season",
seasons = 2018,
measures = "FGA",
periods = 0,
months = 0,
date_from = NA,
date_to = NA,
nest_data = F,
return_message = T
){
team_ids <-
nba_teams_ids(teams = teams,
team_ids = team_ids,
all_active_teams = all_active_teams)
input_df <-
expand.grid(
team_id = team_ids,
season_type = season_types,
season = seasons,
measure = measures,
period = periods,
month = months,
date_from = NA,
date_to = NA,
stringsAsFactors = F
) %>%
as_tibble()
.get_team_shot_chart_safe <-
purrr::possibly(.get_team_shot_chart, tibble())
all_data <-
1:nrow(input_df) %>%
future_map_dfr(function(x) {
df_row <-
input_df %>% slice(x)
df_row %$%
.get_team_shot_chart_safe(
team_id = team_id,
season_type = season_type,
season = seasons,
return_message = return_message
)
})
if (nest_data) {
all_data <-
all_data %>%
nest(-c('yearSeason', "slugSeason", "idTeam", "nameTeam"), .key = dataShotChart)
}
all_data
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateApprovalMemo.R
\name{generateMemoTemplate}
\alias{generateMemoTemplate}
\title{Generate Memo Template File}
\usage{
generateMemoTemplate(draft_memo = TRUE)
}
\arguments{
\item{draft_memo}{Boolean indicating whether the memo being written is a
draft or final memo.}
}
\value{
Officer docx object
}
\description{
Generate Memo Template File
}
| /man/generateMemoTemplate.Rd | permissive | pepfar-datim/datapackr | R | false | true | 427 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateApprovalMemo.R
\name{generateMemoTemplate}
\alias{generateMemoTemplate}
\title{Generate Memo Template File}
\usage{
generateMemoTemplate(draft_memo = TRUE)
}
\arguments{
\item{draft_memo}{Boolean indicating whether the memo being written is a
draft or final memo.}
}
\value{
Officer docx object
}
\description{
Generate Memo Template File
}
|
#' calc_score_stats.R
#'
#' Starting with individual-level data on p factors, generate score test statistics for each
#' factor for input into GBJ/GHC/HC/BJ/minP. Also get the correlations between these test statistics.
#' Designed to be used with linear or logistic or log-linear regression null models.
#'
#' @param null_model An R regression model fitted using glm(). Do not use lm(), even for linear regression!
#' @param factor_matrix An n*p matrix with each factor as one column. There should be no missing data.
#' @param link_function Either "linear" or "logit" or "log"
#' @param P_mat The projection matrix used in calculation may be passed in to speed up the calculation.
#' See paper for details. Default is null.
#'
#' @return A list with the elements:
#' \item{test_stats}{The p score test statistics.}
#' \item{cor_mat}{The p*p matrix giving the pairwise correlation of every two test statistics.}
#'
#' @export
#' @examples
#' set.seed(0)
#' Y <- rbinom(n=100, size=1, prob=0.5)
#' null_mod <- glm(Y~1, family=binomial(link="logit"))
#' factor_mat <- matrix(data=rnorm(n=100*5), nrow=100)
#' calc_score_stats(null_mod, factor_mat, "logit")
calc_score_stats <- function(null_model, factor_matrix, link_function, P_mat=NULL) {
X_mat <- model.matrix(null_model)
d <- ncol(factor_matrix)
fitted_Y <- null_model$fitted.values
actual_Y <- null_model$y
# Only difference between linear and logistic procedure
if (link_function == 'logit') {
W_vec <- fitted_Y * (1-fitted_Y)
} else if (link_function == 'linear') {
W_vec <- rep(summary(null_model)$dispersion, nrow(X_mat))
} else if (link_function == 'log') {
W_vec <- fitted_Y
} else {
stop("Invalid model type")
}
########################
# EZ Mode if linear regression, no additional covariates except for intercept
if (link_function == 'linear' & ncol(model.matrix(null_model)) == 1) {
num_sub <- nrow(X_mat)
sig_sq_hat <- sum( (actual_Y - fitted_Y)^2 ) / (num_sub-1)
test_stats <- rep(NA, d)
denominators <- rep(NA, d)
for(kkk in 1:d)
{
tempF<- factor_matrix[,kkk]
score_num <- t(tempF) %*% (actual_Y-fitted_Y)
score_denom <- tryCatch(sqrt(sig_sq_hat * (sum(tempF^2) - mean(tempF)^2*num_sub)),
warning=function(w) w, error=function(e) e)
# We've been getting negative denominators with, for example, very rare SNPs
if (!(class(score_denom)[1] %in% c("matrix", "array", "numeric"))) {
err_msg <- paste('Error in calculating test statistic for factor ', kkk,
' possibly it is constant? Try removing and rerunning.', sep='')
stop(err_msg)
}
denominators[kkk] <- score_denom
test_stats[kkk] <- score_num / score_denom
}
est_cor <- cor(factor_matrix)
# Return from here
return ( list(test_stats=test_stats, cor_mat=est_cor) )
}
########################
# Regular mode
if (is.null(P_mat)) {
W_mat <- diag(W_vec)
P_mat <- W_mat - W_mat%*%X_mat %*% solve(t(X_mat)%*%W_mat%*%X_mat) %*% t(X_mat)%*%W_mat
} else {
# If they provided a P_mat, make sure it's the correct dimensions
if (nrow(P_mat) != ncol(P_mat) | nrow(P_mat) != nrow(factor_matrix)) {
stop('Your P_mat does not have the correct dimensions (n*n).')
}
}
# Now our score test
test_stats <- rep(NA, d)
denominators <- rep(NA, d)
for(kkk in 1:d)
{
# Pick out next SNP, conduct score test (no additional covariates).
tempF <- factor_matrix[,kkk]
score_num <- t(tempF) %*% (actual_Y-fitted_Y)
score_denom <- tryCatch(sqrt(tempF %*% P_mat %*% tempF), warning=function(w) w,
error=function(e) e)
# We've been getting negative denominators with, for example, very rare SNPs
if (!(class(score_denom)[1] %in% c("matrix", "array", "numeric"))) {
err_msg <- paste('Error in calculating test statistic for factor ', kkk,
' - possibly it is constant? Try removing and rerunning.', sep='')
stop(err_msg)
}
test_stats[kkk] <- score_num / score_denom
denominators[kkk] <- score_denom
}
# Estimate the correlation matrix for the test statistics.
# Same as the correlation matrix of the SNPs if linear regression and no additional covariates.
est_cor <- matrix(data=NA, nrow=d, ncol=d)
for (temp_row in 2:d)
{
for (temp_col in 1:(temp_row-1))
{
est_cor[temp_row, temp_col] <- t(factor_matrix[,temp_row]) %*% P_mat %*% factor_matrix[,temp_col] / (denominators[temp_row] * denominators[temp_col])
est_cor[temp_col, temp_row] <- est_cor[temp_row, temp_col]
}
}
return ( list(test_stats=test_stats, cor_mat=est_cor) )
}
| /GBJ/R/calc_score_stats.R | no_license | akhikolla/InformationHouse | R | false | false | 4,595 | r | #' calc_score_stats.R
#'
#' Starting with individual-level data on p factors, generate score test statistics for each
#' factor for input into GBJ/GHC/HC/BJ/minP. Also get the correlations between these test statistics.
#' Designed to be used with linear or logistic or log-linear regression null models.
#'
#' @param null_model An R regression model fitted using glm(). Do not use lm(), even for linear regression!
#' @param factor_matrix An n*p matrix with each factor as one column. There should be no missing data.
#' @param link_function Either "linear" or "logit" or "log"
#' @param P_mat The projection matrix used in calculation may be passed in to speed up the calculation.
#' See paper for details. Default is null.
#'
#' @return A list with the elements:
#' \item{test_stats}{The p score test statistics.}
#' \item{cor_mat}{The p*p matrix giving the pairwise correlation of every two test statistics.}
#'
#' @export
#' @examples
#' set.seed(0)
#' Y <- rbinom(n=100, size=1, prob=0.5)
#' null_mod <- glm(Y~1, family=binomial(link="logit"))
#' factor_mat <- matrix(data=rnorm(n=100*5), nrow=100)
#' calc_score_stats(null_mod, factor_mat, "logit")
calc_score_stats <- function(null_model, factor_matrix, link_function, P_mat=NULL) {
X_mat <- model.matrix(null_model)
d <- ncol(factor_matrix)
fitted_Y <- null_model$fitted.values
actual_Y <- null_model$y
# Only difference between linear and logistic procedure
if (link_function == 'logit') {
W_vec <- fitted_Y * (1-fitted_Y)
} else if (link_function == 'linear') {
W_vec <- rep(summary(null_model)$dispersion, nrow(X_mat))
} else if (link_function == 'log') {
W_vec <- fitted_Y
} else {
stop("Invalid model type")
}
########################
# EZ Mode if linear regression, no additional covariates except for intercept
if (link_function == 'linear' & ncol(model.matrix(null_model)) == 1) {
num_sub <- nrow(X_mat)
sig_sq_hat <- sum( (actual_Y - fitted_Y)^2 ) / (num_sub-1)
test_stats <- rep(NA, d)
denominators <- rep(NA, d)
for(kkk in 1:d)
{
tempF<- factor_matrix[,kkk]
score_num <- t(tempF) %*% (actual_Y-fitted_Y)
score_denom <- tryCatch(sqrt(sig_sq_hat * (sum(tempF^2) - mean(tempF)^2*num_sub)),
warning=function(w) w, error=function(e) e)
# We've been getting negative denominators with, for example, very rare SNPs
if (!(class(score_denom)[1] %in% c("matrix", "array", "numeric"))) {
err_msg <- paste('Error in calculating test statistic for factor ', kkk,
' possibly it is constant? Try removing and rerunning.', sep='')
stop(err_msg)
}
denominators[kkk] <- score_denom
test_stats[kkk] <- score_num / score_denom
}
est_cor <- cor(factor_matrix)
# Return from here
return ( list(test_stats=test_stats, cor_mat=est_cor) )
}
########################
# Regular mode
if (is.null(P_mat)) {
W_mat <- diag(W_vec)
P_mat <- W_mat - W_mat%*%X_mat %*% solve(t(X_mat)%*%W_mat%*%X_mat) %*% t(X_mat)%*%W_mat
} else {
# If they provided a P_mat, make sure it's the correct dimensions
if (nrow(P_mat) != ncol(P_mat) | nrow(P_mat) != nrow(factor_matrix)) {
stop('Your P_mat does not have the correct dimensions (n*n).')
}
}
# Now our score test
test_stats <- rep(NA, d)
denominators <- rep(NA, d)
for(kkk in 1:d)
{
# Pick out next SNP, conduct score test (no additional covariates).
tempF <- factor_matrix[,kkk]
score_num <- t(tempF) %*% (actual_Y-fitted_Y)
score_denom <- tryCatch(sqrt(tempF %*% P_mat %*% tempF), warning=function(w) w,
error=function(e) e)
# We've been getting negative denominators with, for example, very rare SNPs
if (!(class(score_denom)[1] %in% c("matrix", "array", "numeric"))) {
err_msg <- paste('Error in calculating test statistic for factor ', kkk,
' - possibly it is constant? Try removing and rerunning.', sep='')
stop(err_msg)
}
test_stats[kkk] <- score_num / score_denom
denominators[kkk] <- score_denom
}
# Estimate the correlation matrix for the test statistics.
# Same as the correlation matrix of the SNPs if linear regression and no additional covariates.
est_cor <- matrix(data=NA, nrow=d, ncol=d)
for (temp_row in 2:d)
{
for (temp_col in 1:(temp_row-1))
{
est_cor[temp_row, temp_col] <- t(factor_matrix[,temp_row]) %*% P_mat %*% factor_matrix[,temp_col] / (denominators[temp_row] * denominators[temp_col])
est_cor[temp_col, temp_row] <- est_cor[temp_row, temp_col]
}
}
return ( list(test_stats=test_stats, cor_mat=est_cor) )
}
|
library(easyPubMed)
library(tidyverse)
library(data.table)
library(ggplot2)
library(dplyr)
#search performed August 15, 2020
pubmed_query_string="(methylomics OR epigenomics OR NGS OR \"next generation sequencing\" OR RNA-Seq OR \"mRNA sequencing\" OR \"RNA sequencing\" OR \"RNA-sequencing\" OR \"transcriptome sequencing\" OR \"whole exome sequencing\" OR \"whole-exome sequencing\" OR \"high throughput sequencing\" OR \"high-throughput sequencing\" OR \"DNA sequencing\" OR \"RNA sequencing\" OR \"RNA-sequencing\" OR \"DNA-sequencing\" OR WXS OR WGS OR \"whole-genome sequencing\" OR \"whole genome sequencing\") AND (rheumatology OR \"rheumatologic disease\" OR \"rheumatologic disease\" ))"
pubmed_ids <- get_pubmed_ids(pubmed_query_string)
#1097 entries
abstracts_xml <- fetch_pubmed_data(pubmed_ids,retmax = 5000)
abstracts_list <- articles_to_list(abstracts_xml)
saveRDS(abstracts_list,file="/Users/sebastian/pubmed_rheuma_HTS/abstracts_list_for_keywords.RDS")
final=c("")
for (key in abstracts_list){
keywords = custom_grep(key,"KeywordList","char")
if(!is.null(keywords)){
out=str_replace_all(keywords,"<Keyword MajorTopicYN=\"N\">","")
out=str_replace_all(out,"<Keyword MajorTopicYN=\"Y\">","")
out=strsplit(out,"</Keyword>")[[1]]
final=c(final,out)
}
}
final=final[final!=" "]
final=tolower(final)
finalICD11_filtered=c("")
icd11 <- read.delim("~/icd11.txt", stringsAsFactors=FALSE)
icd11$Title=tolower(icd11$Title)
for (key in final){
retrows=icd11[icd11$Title %like% key,]
if (length(retrows) >= 1){
finalICD11_filtered=c(finalICD11_filtered,key)}}
finalTable_freq=table(finalICD11_filtered)
View(finalTable_freq)
write.csv2(finalTable_freq,"/Users/sebastian/pubmed_rheuma_HTS/finalTable_freq.csv")
#326 entries in finalTable_freq
#search performed Sep,4, 2020
pubmed_query_string="(methylomics OR epigenomics OR NGS OR \"next generation sequencing\" OR RNA-Seq OR \"mRNA sequencing\" OR \"RNA sequencing\" OR \"RNA-sequencing\" OR \"transcriptome sequencing\" OR \"whole exome sequencing\" OR \"whole-exome sequencing\" OR \"high throughput sequencing\" OR \"high-throughput sequencing\" OR WXS OR WGS OR \"whole-genome sequencing\" OR \"whole genome sequencing\") AND (\"autoinflammatory syndrome\" OR dermatomyositis OR enthesitis OR \"familial mediterranean fever\" OR \"granulomatosis with polyangiitis\" OR \"juvenile idiopathic arthritis\" OR myositis OR osteoarthritis OR polymyositis OR \"psoriatic arthritis\" OR \"rheumatoid arthritis\" OR sacroiliitis OR \"sjögren syndrome\" OR \"sjögren's syndrome\" OR spondyloarthritis OR synovitis OR \"systemic lupus erythematosus\" OR \"systemic sclerosis\" OR vasculitis OR uveitis OR gout OR polychondritis)"
pubmed_ids <- get_pubmed_ids(pubmed_query_string)
pubmed_ids$Count
#1162 hits
abstracts_xml <- fetch_pubmed_data(pubmed_ids,retmax = 5000)
abstracts_list <- articles_to_list(abstracts_xml)
diseases=c("autoinflammatory syndrome","dermatomyositis","enthesitis","familial mediterranean fever","granulomatosis with polyangiitis","juvenile idiopathic arthritis","myositis","osteoarthritis","polymyositis","psoriatic arthritis","rheumatoid arthritis","sacroiliitis","sjögren syndrome","sjögren's syndrome","spondyloarthritis","synovitis","systemic lupus erythematosus","systemic sclerosis","vasculitis","uveitis","gout","polychondritis")
RNA=c("rna sequencing", "transcriptome sequencing", "rna-seq","rna-based next-generation sequencing","mrna profiles","rna-sequencing")
WXS=c("whole exome sequencing","whole-exome sequencing","whole-exome-sequencing","amplicon sequencing")
WGS=c("whole-genome sequencing","whole genome sequencing","whole-genome shotgun sequencing")
Bacteria=c("16s","metagenomics")
Epigenomic=c("atac-seq","chip-seq","wgbs","methylomics","methylomic","epigenomics")
single=c("single cell","single-cell")
result=NULL
for (key in abstracts_list){
review=custom_grep(key,"PublicationType","char")
journal=str_replace(custom_grep(custom_grep(key,"Journal","char"),"Title","char"),"&","&")
year=custom_grep(custom_grep(key,"PubDate","char"),"Year","char")
title=custom_grep(key,"ArticleTitle","char")
title=tolower(title)
abstract = custom_grep(key,"Abstract","char")
abstract=tolower(abstract)
keywords=custom_grep(key,"KeywordList","char")
keywords=tolower(keywords)
title_abstract_key=paste(title,abstract,sep=",")
title_abstract_key=paste(title_abstract_key,keywords,sep=",")
pubmedid=custom_grep(key,"PMID","char")
if(!is.null(title_abstract_key) & length(grep("Review",review))==0){
assay=""
for (epi in Epigenomic){
if (length(grep(epi,title_abstract_key))>0){assay=paste(assay,"Epigenomics",sep=",")}
}
for (bac in Bacteria){
if (length(grep(bac,title_abstract_key))>0){assay=paste(assay,"Metagenomics",sep=",")}
}
for (rna in RNA){
if (length(grep(rna,title_abstract_key))>0){
if(!grepl("RNA",assay)){
assay=paste(assay,"RNA-Seq",sep=",")}}
}
for (wxs in WXS){
if (length(grep(wxs,title_abstract_key))>0){
if(!grepl("WES",assay)){
assay=paste(assay,"WES",sep=",")}}
}
for (wgs in WGS){
if (length(grep(wgs,title_abstract_key))>0){assay=paste(assay,"WGS",sep=",")}
}
for (sin in single){
if (length(grep(sin,title_abstract_key))>0){assay=paste(assay,"single",sep=",")}
}
for (disease in diseases){
if (length(title_abstract_key)>0){
if(title_abstract_key %like% disease){
result=rbind(result,c(disease,pubmedid[1],substr(assay,2,nchar(assay)),journal,year))
}
}
}
}
}
result_df=as.data.frame(result)
names(result_df)=c("disease","pmid","assay","journal","year")
result_df$assay=as.character(result_df$assay)
result_df$journal=as.character(result_df$journal)
result_df$disease=as.character(result_df$disease)
result_df$disease=stringr::str_to_title(result_df$disease)
#manually adding missing data by looking at each publication using the PMID
#995 entries, 847 unique Pubmed-IDs
result_df$pmid=as.character(result_df$pmid)
result_df[result_df$pmid=="30944248",]$assay="RNA-Seq,single"
result_df[result_df$pmid=="31428656",]$assay="RNA-Seq,single"
result_df[result_df$pmid=="31370803",]$assay="Metagenomics"
result_df[result_df$pmid=="31360262",]$assay="RNA-Seq"
result_df[result_df$pmid=="31344123",]$assay="RNA-Seq"
result_df[result_df$pmid=="31337345",]$assay="DNA gene panel"
result_df[result_df$pmid=="31101814",]$assay="WGS,WES"
result_df[result_df$pmid=="30783801",]$assay="DNA gene panel"
result_df[result_df$pmid=="30513227",]$assay="DNA gene panel"
result_df[result_df$pmid=="29892977",]$assay="RNA-Seq"
result_df[result_df$pmid=="29891556",]$assay="RNA-Seq"
result_df[result_df$pmid=="29735907",]$assay="DNA gene panel"
result_df[result_df$pmid=="29681619",]$assay="DNA gene panel"
result_df[result_df$pmid=="29659823",]$assay="WES"
result_df[result_df$pmid=="29657145",]$assay="RNA-Seq"
result_df[result_df$pmid=="29432186",]$assay="RNA-Seq"
result_df[result_df$pmid=="28791018",]$assay="DNA gene panel"
result_df[result_df$pmid=="28750028",]$assay="DNA gene panel"
result_df[result_df$pmid=="28728565",]$assay="RNA-Seq"
result_df[result_df$pmid=="28523199",]$assay="DNA gene panel"
result_df[result_df$pmid=="31856934",]$assay="WES"
result_df[result_df$pmid=="31848804",]$assay="DNA gene panel"
result_df[result_df$pmid=="31993940",]$assay="WES"
result_df[result_df$pmid=="31988805",]$assay="RNA-Seq"
result_df[result_df$pmid=="31598594",]$assay="DNA gene panel"
result_df[result_df$pmid=="31856934",]$year=2019
result_df[result_df$pmid=="29739689",]$year=2018
result_df[result_df$pmid=="31003835",]$year=2019
result_df[result_df$pmid=="31538826",]$year=2019
result_df[result_df$pmid=="31003835",]$assay="Metagenomics"
result_df[result_df$pmid=="23666743",]$assay="WES"
result_df[result_df$pmid=="25285625",]$assay="DNA gene panel"
result_df[result_df$pmid=="25091625",]$assay="Epigenomics,RNA-Seq"
result_df[result_df$pmid=="24795478",]$assay="WGS,RNA-Seq"
result_df[result_df$pmid=="23606709",]$assay="RNA-Seq"
result_df[result_df$pmid=="32042094",]$assay="DNA gene panel"
result_df[result_df$pmid=="31926670",]$assay="Metagenomics"
result_df[result_df$pmid=="31921732",]$assay="Metagenomics"
result_df[result_df$pmid=="31898522",]$assay="RNA-Seq"
result_df[result_df$pmid=="31880128",]$assay="Metagenomics"
result_df[result_df$pmid=="31874111",]$assay="WGS,WES"
result_df[result_df$pmid=="31779271",]$assay="RNA-Seq"
result_df[result_df$pmid=="31832070",]$assay="DNA gene panel"
result_df[result_df$pmid=="31538826",]$assay="DNA gene panel"
result_df[result_df$pmid=="31523167",]$assay="RNA-Seq"
result_df[result_df$pmid=="31443670",]$assay="DNA gene panel"
result_df[result_df$pmid=="31412876",]$assay="DNA gene panel"
result_df[result_df$pmid=="31237906",]$assay="RNA-Seq"
result_df[result_df$pmid=="30987788",]$assay="DNA gene panel"
result_df[result_df$pmid=="30850477",]$assay="RNA-Seq"
result_df[result_df$pmid=="30836987",]$assay="RNA-Seq"
result_df[result_df$pmid=="30724444",]$assay="RNA-Seq"
result_df[result_df$pmid=="30700427",]$assay="Metagenomics"
result_df[result_df$pmid=="30544699",]$assay="RNA-Seq"
result_df[result_df$pmid=="30185675",]$assay="RNA-Seq"
result_df[result_df$pmid=="30123050",]$assay="RNA-Seq"
result_df[result_df$pmid=="29683194",]$assay="DNA gene panel"
result_df[result_df$pmid=="29614084",]$assay="DNA gene panel"
result_df[result_df$pmid=="29490685",]$assay="DNA gene panel"
result_df[result_df$pmid=="29465611",]$assay="DNA gene panel"
result_df[result_df$pmid=="29381936",]$assay="DNA gene panel"
result_df[result_df$pmid=="29371932",]$assay="RNA-Seq"
result_df[result_df$pmid=="29247798",]$assay="RNA-Seq"
result_df[result_df$pmid=="29137139",]$assay="RNA-Seq"
result_df[result_df$pmid=="29129473",]$assay="DNA gene panel"
result_df[result_df$pmid=="29123165",]$assay="RNA-Seq"
result_df[result_df$pmid=="28808260",]$assay="RNA-Seq"
result_df[result_df$pmid=="28597968",]$assay="WGS,WES"
result_df[result_df$pmid=="28495399",]$assay="DNA gene panel"
result_df[result_df$pmid=="28348750",]$assay="Metagenomics"
result_df[result_df$pmid=="28339495",]$assay="RNA-Seq"
result_df[result_df$pmid=="28179509",]$assay="Metagenomics"
result_df[result_df$pmid=="28115215",]$assay="WGS"
result_df[result_df$pmid=="27835701",]$assay="RNA-Seq"
result_df[result_df$pmid=="27821747",]$assay="PhIP-Seq"
result_df[result_df$pmid=="27571913",]$assay="Metagenomics"
result_df[result_df$pmid=="27542282",]$assay="RNA-Seq"
result_df[result_df$pmid=="27420474",]$assay="RNA-Seq"
result_df[result_df$pmid=="27390188",]$assay="DNA gene panel"
result_df[result_df$pmid=="29635721",]$assay="DNA gene panel"
result_df[result_df$pmid=="29200130",]$assay="DNA gene panel"
result_df[result_df$pmid=="28043923",]$assay="WES"
result_df[result_df$pmid=="27183593",]$assay="RNA-Seq"
result_df[result_df$pmid=="26815131",]$assay="DNA gene panel"
result_df[result_df$pmid=="26713667",]$assay="RNA-Seq"
result_df[result_df$pmid=="26603474",]$assay="DNA gene panel"
result_df[result_df$pmid=="26227771",]$assay="DNA gene panel"
result_df[result_df$pmid=="26001779",]$assay="RNA-Seq"
result_df[result_df$pmid=="25946710",]$assay="RNA-Seq"
result_df[result_df$pmid=="25700344",]$assay="DNA gene panel,RNA-Seq"
result_df[result_df$pmid=="25638290",]$assay="DNA gene panel"
result_df[result_df$pmid=="25638290",]$assay="WES"
result_df[result_df$pmid=="25498120",]$assay="DNA gene panel"
result_df[result_df$pmid=="25335895",]$assay="RNA-Seq"
result_df[result_df$pmid=="30481710",]$assay="Metagenomics"
result_df[result_df$pmid=="29465611",]$assay="DNA gene panel"
result_df[result_df$pmid=="29997562",]$assay="DNA gene panel"
result_df[result_df$pmid=="22472776",]$assay="DNA gene panel"
result_df[result_df$pmid=="22294635",]$assay="RNA-Seq"
result_df[result_df$pmid=="30746468",]$assay="RNA-Seq"
result_df[result_df$pmid=="31682074",]$assay="RNA-Seq"
result_df[result_df$pmid=="32265907",]$assay="RNA-Seq"
result_df[result_df$pmid=="32237059",]$assay="RNA-Seq"
result_df[result_df$pmid=="32199921",]$assay="DNA gene panel"
result_df[result_df$pmid=="32159782",]$assay="WGS"
result_df[result_df$pmid=="32115259",]$assay="RNA-Seq"
result_df[result_df$pmid=="24136464",]$assay="DNA gene panel"
result_df[result_df$pmid=="27402083",]$assay="RNA-Seq"
result_df[result_df$pmid=="28053302",]$assay="DNA gene panel"
result_df[result_df$pmid=="28053320",]$assay="RNA-Seq"
result_df[result_df$pmid=="28532706",]$assay="WES"
result_df[result_df$pmid=="29472286",]$assay="WES" #case report
result_df[result_df$pmid=="29500522",]$assay="DNA gene panel"
result_df[result_df$pmid=="29040051",]$assay="DNA gene panel" #case reports
result_df[result_df$pmid=="29774027",]$assay="Epigenomics" #bisulfite amplicon seq using NGS
result_df[result_df$pmid=="30962246",]$assay="RNA-Seq" #PRJNA427177
result_df[result_df$pmid=="31101603",]$assay="DNA gene panel" #TCR receptor seq
result_df[result_df$pmid=="31882654",]$assay="RNA-Seq" #BCR repertoire RA
result_df[result_df$pmid=="31926583",]$assay="RNA-Seq" #no making data available
result_df[result_df$pmid=="32191636",]$assay="RNA-Seq" #GEO but embargo
result_df[result_df$pmid=="32196497",]$assay="RNA-Seq" #re-used RNA-Seq data from e.g. SRA
result_df[result_df$pmid=="32332704",]$assay="RNA-Seq" #no making data available
result_df[result_df$pmid=="32365362",]$assay="RNA-Seq" #no making data available
result_df[result_df$pmid=="32403239",]$assay="RNA-Seq" #no making data available
result_df[result_df$pmid=="32471379",]$assay="DNA gene panel" #case reports
result_df[result_df$pmid=="32510848",]$assay="DNA gene panel"
result_df[result_df$pmid=="32518584",]$assay="RNA-Seq" #RA, datasets available on request
result_df[result_df$pmid=="32552384",]$assay="DNA gene panel" #case reports
result_df[result_df$pmid=="32560314",]$assay="RNA-Seq" #PsA, no making data available
result_df[result_df$pmid=="32565918",]$assay="Metagenomics"
result_df[result_df$pmid=="32607330",]$assay="DNA gene panel"
result_df[result_df$pmid=="32659156",]$assay="WES" #case report
result_df[result_df$pmid=="32714036",]$assay="Metagenomics"
result_df[result_df$pmid=="32714036",]$assay="Metagenomics" #case report Virus identification
result_df[result_df$pmid=="32735477",]$assay="Metagenomics" #case report Virus identification
result_df[result_df$pmid=="32745911",]$assay="RNA-Seq" #OA, no making data available
result_df[result_df$pmid=="32807082",]$assay="Metagenomics" #case report Virus identification
result_df[result_df$pmid=="32818496",]$assay="RNA-Seq" #SLE (LN), https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE155405 (Mouse)
result_df[result_df$pmid=="31651337",]$assay="Epigenomics" #whole genome bisulfite seq using HiSeq4000
result_df[result_df$pmid=="32438470",]$assay="Epigenomics" #bioinformatics whole genome bisulfite seq
result_df[result_df$pmid=="31926583",]$pmid=31465725
#Epigenomics=bisulfite seq using NGS, ATAC-Seq
#DNA gene panel=targeted re-sequencing
#delete entries
#no information about NGS assay: 24065968,29607230,32264838,23942639
#no NGS: 21262369,22046267 (GWAS),20514598, 29257335 microarray, 28686088,22402741,22275833,22627728,18037627,18390570,20842748,16227418,15022312,11317015,10750629,26705675,30679579,27421624,
#26984631,25990289 microarray,25828588,28417081,31347786,30906878,12522564,21192791,21436623,21865261,22014533 ChIP microarray,22300536, 23340290,23353785,23950730 (Methylation400 assay),
#24812288, 25261579 (HumanMethylation450 BeadChip arrays), 25537884,25661834 (microarray), 25789080, 26079624, 26492816, 26556652(HumanMethylation450 BeadChips), 26628598(Microarray), 27325182, 27818202
#27876072, 27885849(HumanMethylation450 BeadChips),28081217,28430662,28475762(HumanMethylation450 BeadChips), 28785300(GWAS), 29018507(BS-Seq),26111028,26787370(BS-Seq),29422534,30385847,
#30598166,30779748,31467281(llumina MethylationEPIC BeadChip),31513634(Infinium MethylationEPIC BeadChip array),32509015(microarray),25414732,29854846,32641481
#32581359:GWAS,24445253, 24223582 GWAS,24013639 GWAS
#not disease defined in 1st round:
#29769526, 31021403 periodontal diagnosis, 30100989,31033783 oncology, 31745718, 30337930,#26182405 Aicardi-Goutières-Syndrom,29710329 meninigitis,31006511 not using Rheuma seq data, 29434599(keratinocyes)
#32489448 glioma,31091335,31081255,30974145,30313048(myocard infarct),29608426(zika virus infection),29184082(cervical cancer),28920417
#no access and no info of assay in abstract: #30441946,31494232,30946936,30054207,30774014,31586650,25799352,25541309,25373782,25791244,25791245,29113828,30523707,31693422,31755746,30304699,31883828,32157911
#29674726 astrovirus in goslings, 28012117, 31744152 in chicken, #29920970 goose-origin astrovirus, 32532177 (pig)
#not english: 30463656,23981988,30124204 article in chinese
#retracted: 30643044
#not primary research article:#31652165,25598697,24070009,22933063,30632919,31858722 review, 26284111 PERSPECTIVE ARTICLE,28286571 Commentary about NGS and gut microbiome,21273237,
#23025638 & 23244304 Editorial,23275983, 25167330 Commentary,25366188,25557657, 26231343 (editorial), 27028588(meeting report),27482953(editorial),27607472(review),28447857(editorial),
#25165988(Commetary),28770613(review),29644081(review&meta-analysis),31699217,31876195,31902241(review&meta-analysis),32203190(review),32475529(book chapter, review),32611772(editorial),32746644(review),32806879(review)
#30399325(review)
deletepmids=c("32264838","31494232","31744152","30632919","22933063","25373782","25541309","25799352","26182405","26705675","28012117","28286571","28417081",
"28686088","29257335","29674726","29769526","29920970","30100989","30441946","30463656","30643044","30946936","31021403","31033783","31652165",
"31745718","26284111","22402741","25598697","24065968","24070009","22275833","22627728","18037627","18390570","20842748","16227418","15022312",
"11317015","10750629","30054207","30774014","31586650","30337930","30679579","31858722","29607230","29710329","27421624","26984631","25990289",
"25828588","31347786","31006511","30906878","12522564","21192791","21273237","21436623","21865261","22014533","22300536","23025638","23244304",
"23275983","23340290","23353785","23942639","23950730","23981988","24812288","25167330","25261579","25366188","25537884","25557657","25661834",
"25789080","25791244","25791245","26079624","26231343","26492816","26556652","26628598","27028588","27325182","27482953","27607472","27818202",
"27876072","27885849","28081217","28430662","28447857","25165988","28475762","28770613","28785300","29018507","26787370","26111028","29113828",
"29422534","29644081","30523707","31693422","31755746","30124204","30304699","30385847","30598166","30779748","31467281","31513634","31699217",
"31876195","31883828","31902241","32157911","32203190","32475529","32509015","32532177","32611772","32746644","32806879","25414732","29854846",
"32641481","29434599","32581359","32489448","31091335","31081255","30974145","30399325","30313048","29608426","29184082","28920417","24445253",
"24223582","24013639","20514598","21262369","22046267")
result_df=result_df[(!result_df$pmid %in% deletepmids),]
#Aicardi-Goutières syndrome–like
result_df[result_df$pmid=="31874111" & result_df$disease=="Gout",]=NA
result_df <- na.omit(result_df)
#no NGS on SLE samples
result_df[result_df$pmid=="29720240" & result_df$disease=="SLE",]=NA
result_df <- na.omit(result_df)
#manually add studies identified via SRA project search
result_df=rbind(result_df,c("SLE",24645875,"RNA-Seq","Connective tissue research",2014,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",31263277,"Epigenomics,RNA-Seq","Nature Immunology",2019,"Other"))
result_df=rbind(result_df,c("SLE",31890206,"RNA-Seq","Clinical & translational immunology",2019,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",29717110,"RNA-Seq","Nature communications",2018,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",28420548,"RNA-Seq","Journal of autoimmunity",2017,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",30130253,"RNA-Seq","The Journal of clinical investigation",2018,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",31200750,"RNA-Seq","Arthritis research & therapy",2019,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",30478422,"Epigenomics,RNA-Seq","Nature immunology",2019,"Other"))
result_df=rbind(result_df,c("SLE",31616406,"RNA-Seq","Frontiers in immunology",2019,"RNA-Seq"))
result_df=rbind(result_df,c("RA",31616406,"RNA-Seq","Frontiers in immunology",2019,"RNA-Seq"))
result_df=rbind(result_df,c("Sjögren's Syndrome",31616406,"RNA-Seq","Frontiers in immunology",2019,"RNA-Seq"))
#Rename categories
result_df[result_df$assay=="RNA-Seq,WGS",]$assay="WGS,RNA-Seq"
result_df[result_df$assay=="Epigenomics,Epigenomics",]$assay="Epigenomics"
result_df[result_df$assay=="Epigenomics,Epigenomics,RNA-Seq",]$assay="Epigenomics,RNA-Seq"
result_df[result_df$assay=="WES,WGS",]$assay="WGS,WES"
result_df[result_df$assay=="single",]$assay="scRNA-Seq"
result_df[result_df$assay=="single,single",]$assay="scRNA-Seq"
result_df[result_df$assay=="RNA-Seq,single",]$assay="scRNA-Seq"
result_df[result_df$assay=="WES,single",]$assay="WES,scRNA-Seq"
result_df[result_df$assay=="RNA-Seq,single,single",]$assay="scRNA-Seq"
result_df[result_df$assay=="DNA gene panel",]$assay="Targeted DNA Seq"
result_df[result_df$assay=="DNA gene panel,RNA-Seq",]$assay="Targeted DNA Seq,RNA-Seq"
result_df$assay_main=result_df$assay
result_df[result_df$assay=="Metagenomics",]$assay_main="Other"
result_df[result_df$assay=="Metagenomics,RNA-Seq,WES",]$assay_main="Other"
result_df[result_df$assay=="Epigenomics",]$assay_main="Other"
result_df[result_df$assay=="Epigenomics,RNA-Seq",]$assay_main="Other"
result_df[result_df$assay=="Epigenomics,RNA-Seq,WES",]$assay_main="Other"
result_df[result_df$assay=="Epigenomics,WGS",]$assay_main="Other"
result_df[result_df$assay=="Metagenomics,RNA-Seq",]$assay_main="Other"
result_df[result_df$assay=="PhIP-Seq",]$assay_main="Other"
result_df[result_df$disease=="Systemic Lupus Erythematosus",]$disease="SLE"
result_df[result_df$disease=="Familial Mediterranean Fever",]$disease="FMF"
result_df[result_df$disease=="Rheumatoid Arthritis",]$disease="RA"
result_df[result_df$disease=="Juvenile Idiopathic Arthritis",]$disease="JIA"
result_df[result_df$disease=="Autoinflammatory Syndrome",]$disease="AutoSyn"
result_df[result_df$disease=="Granulomatosis With Polyangiitis",]$disease="GPA"
saveRDS(result_df,file="/Users/sebastian/pubmed_rheuma_HTS/result_df_11Sep2020.RDS")
#-------redo all figures from paper--------------------------------
#Figure 3: diseases vs assay
df2=result_df%>%
group_by(disease)%>%
count(disease,name="number")
ggplot(data=result_df, aes(x=fct_infreq(disease),fill=assay_main)) + geom_bar(position="stack",stat="count") +
labs(y="# publications on pubmed") +
geom_text(data = df2, aes(x=disease,y=200,label=number,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,200,10)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure S3. disease vs other_assays
ggplot(data=subset(result_df,assay_main=="Other"), aes(x=fct_infreq(disease),fill=assay)) + geom_bar(position="stack",stat="count") +
labs(y="# publications on pubmed") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
result_df_unique=result_df
result_df_unique$disease=NULL
result_df_unique=unique(result_df_unique)
#S2: scRNA-Seq vs year
ggplot(data=subset(result_df_unique, result_df_unique$assay %in% c("scRNA-Seq","WES,scRNA-Seq")), aes(x=year,fill=assay)) + geom_bar(position="stack",stat="count") +
labs(y="# unique publications on pubmed") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,25,5)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure 2: publications vs year
df2=result_df_unique%>%
group_by(year)%>%
count(year,name="number")
abundance_plot=ggplot(data=result_df_unique, aes(x=year,fill=assay_main)) + geom_bar(position="stack",stat="count") +
labs(y="# unique publications on pubmed") +
geom_text(data = df2, aes(x=year,y=200,label=number,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,400,10)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
abundance_plot
#FigureS1: exponential growth
occurences=table(unlist(result_df_unique$year))
year_abundance=as.data.frame(occurences[9:18])
names(year_abundance)=c("year","abundance")
df <- cbind(year_abundance, index = 1:nrow(year_abundance))
fit <- lm(log(abundance) ~ index, data = df)
abundance_plot + stat_function(fun = function(x) exp(fit$coefficients[1] + x*fit$coefficients[2]))
#------journals vs assays
journal_freq=table(result_df_unique$journal)
journal_freq=as.data.frame(journal_freq)
names(journal_freq)=c("journal","freq")
journal_freq$freq=as.numeric(journal_freq$freq)
ggplot(data=subset(result_df_unique,journal %in% journal_freq[journal_freq$freq>=8,]$journal), aes(x=fct_infreq(journal),fill=assay_main)) + geom_bar(position="stack",stat="count") +
labs(y="# unique publications on pubmed") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,60,10)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
TableForPub=NULL
for (key in abstracts_list){
pubmedid=custom_grep(key,"PMID","char")
if (pubmedid[1] %in% result_df$pmid){
title=custom_grep(key,"ArticleTitle","char")
abstract = custom_grep(key,"Abstract","char")
TableForPub=rbind(TableForPub,c(pubmedid[1],title,abstract))
}
}
#create table with title and abstract of all identified publications
TableForPub=as.data.frame(TableForPub)
names(TableForPub)=c("pmid","title","abstract")
#export both result tables
write.csv2(result_df,"~/pubmed_rheuma_HTS/main_results.csv")
write.csv2(TableForPub,"~/pubmed_rheuma_HTS/title_and_abstracts.csv")
#number of assays in unique publications
length(unique(result_df[result_df$assay %in% c("RNA-Seq","Epigenomics,RNA-Seq","Epigenomics,RNA-Seq,WES","Metagenomics,RNA-Seq","Metagenomics,RNA-Seq,WES","RNA-Seq,WES","scRNA-Seq","Targeted DNA Seq,RNA-Seq","WES,scRNA-Seq","WGS,RNA-Seq"),]$pmid))
length(unique(result_df[result_df$assay %in% c("scRNA-Seq","WES,scRNA-Seq"),]$pmid))
length(unique(result_df[result_df$assay %in% c("WES","Epigenomics,RNA-Seq,WES","Metagenomics,RNA-Seq,WES","RNA-Seq,WES","WES,scRNA-Seq","WGS,WES","Targeted DNA Seq","Targeted DNA Seq,RNA-Seq"),]$pmid))
length(unique(result_df[result_df$assay %in% c("Metagenomics","Metagenomics,RNA-Seq,WES","Metagenomics,RNA-Seq"),]$pmid))
length(unique(result_df[result_df$assay %in% c("WGS","Epigenomics,WGS","WGS,RNA-Seq","WGS,WES"),]$pmid))
length(unique(result_df[result_df$assay %in% c("Epigenomics","Epigenomics,WGS","Epigenomics,RNA-Seq","Epigenomics,RNA-Seq,WES"),]$pmid))
#2nd analysis----------------------------------------------------
#no datasets: FMF, enthesitis, polychondritis
#gout: only cancer datasets
#-----------Figure 4 -------SRA datasets
sra_datasets <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_assays.tsv")
sra_datasets=as.data.frame(sra_datasets)
sra_datasets$assay=as.character(sra_datasets$assay)
sra_datasets[sra_datasets$assay=="chip",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="chip-seq",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="mirna-seq",]$assay="miRNA/ncRNA-Seq"
sra_datasets[sra_datasets$assay=="ncrna-seq",]$assay="miRNA/ncRNA-Seq"
#Methylation=Bisulfite-Seq, MEDIP-Seq/MRE-Seq
sra_datasets[sra_datasets$assay=="bisulfite-seq",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="atac-seq",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="dnase-hypersensitivity",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="medip-seq",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="mre-seq",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="amplicon",]$assay="Targeted-capture"
sra_datasets[sra_datasets$assay=="targeted-capture",]$assay="Targeted-capture"
sra_datasets[sra_datasets$assay=="rna-seq",]$assay="RNA-Seq"
sra_datasets[sra_datasets$assay=="wgs",]$assay="WGS"
sra_datasets[sra_datasets$assay=="wxs",]$assay="WXS"
sra_datasets[sra_datasets$assay=="tn-seq",]$assay="TN-Seq"
sra_datasets[sra_datasets$assay=="mbd-seq",]$assay="MBD-Seq"
sra_datasets[sra_datasets$assay=="hi-c",]$assay="Hi-C"
sra_datasets[sra_datasets$assay=="other",]$assay="Other"
sra_datasets$disease=as.character(sra_datasets$disease)
sra_datasets$samples=as.integer(sra_datasets$samples)
sra_datasets[sra_datasets$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_datasets[sra_datasets$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_datasets[sra_datasets$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_datasets%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_datasets$disease=factor(sra_datasets$disease,levels=df2[order(df2$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_datasets, aes(x=disease,y=samples,fill=assay)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df2, aes(x=disease,y=9000,label=samples,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure S5 ------------samples per study----------------------------
sra_studies <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_studies.tsv")
sra_studies=as.data.frame(sra_studies)
sra_studies$number=as.integer(sra_studies$number)
sra_studies$disease=as.character(sra_studies$disease)
sra_studies[sra_studies$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_studies[sra_studies$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_studies[sra_studies$disease=="Sjoegren",]$disease="Sjoegren's Syndrome"
df2=sra_studies%>%
group_by(disease)%>%
count(disease,name="number")
sra_studies$disease=factor(sra_studies$disease,levels=df2[order(df2$number,decreasing = TRUE),]$disease)
dataMedian <- summarise(group_by(sra_studies, disease), MD = median(number))
ggplot(data=sra_studies, aes(x=disease,y=number)) + geom_jitter() + geom_boxplot(alpha = 0.2,outlier.shape = NA) + scale_y_log10() +
geom_text(data = df2, aes(x=disease,y=8000,label=number,angle=60),inherit.aes = FALSE) +
geom_text(data = dataMedian, aes(disease, MD, label = MD), position = position_dodge(width = 0.8), size = 3, vjust = -0.5) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) + ylab("number of samples per study") +
theme(legend.title = element_blank()) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#FigureS9-------sequencing_instrument
sra_instruments <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_instrument.tsv")
sra_instruments=as.data.frame(sra_instruments)
sra_instruments$instrument=as.character(sra_instruments$instrument)
sra_instruments$disease=as.character(sra_instruments$disease)
sra_instruments$samples=as.integer(sra_instruments$samples)
sra_instruments[sra_instruments==0] <- NA
sra_instruments=sra_instruments[complete.cases(sra_instruments),]
sra_instruments[sra_instruments$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_instruments[sra_instruments$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_instruments[sra_instruments$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df_instrument=sra_instruments%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_instruments$disease=factor(sra_instruments$disease,levels=df_instrument[order(df_instrument$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_instruments, aes(x=disease,y=samples,fill=instrument)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df_instrument, aes(x=disease,y=9000,label=samples,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure S10-------layout
sra_layout <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_layout.tsv")
sra_layout=as.data.frame(sra_layout)
sra_layout$layout=as.character(sra_layout$layout)
sra_layout$disease=as.character(sra_layout$disease)
sra_layout$samples=as.integer(sra_layout$samples)
#sra_layout[sra_layout==0] <- NA
#sra_layout=sra_layout[complete.cases(sra_layout),]
sra_layout[sra_layout$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_layout[sra_layout$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_layout[sra_layout$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_layout%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_layout$disease=factor(sra_layout$disease,levels=df2[order(sra_layout$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_layout, aes(x=disease,y=samples,fill=layout)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df2, aes(x=disease,y=9000,label=samples, angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
#scale_y_continuous(breaks=seq(0,9000,1000)) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#----------source------
#SRA datasets
sra_source <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_source.tsv")
sra_source=as.data.frame(sra_source)
sra_source$source=as.character(sra_source$source)
sra_source$disease=as.character(sra_source$disease)
sra_source$samples=as.integer(sra_source$samples)
sra_source[sra_source$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_source[sra_source$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_source[sra_source$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_source%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_source$disease=factor(sra_source$disease,levels=df2[order(df2$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_source, aes(x=disease,y=samples,fill=source)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df2, aes(x=disease,y=9000,label=samples,),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure 5-------tissues
sra_tissue <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_tissue.tsv")
sra_tissue=as.data.frame(sra_tissue)
sra_tissue$tissue=as.character(sra_tissue$tissue)
sra_tissue$disease=as.character(sra_tissue$disease)
sra_tissue$samples=as.integer(sra_tissue$samples)
sra_tissue[sra_tissue==0] <- NA
sra_tissue=sra_tissue[complete.cases(sra_tissue),]
sra_tissue[sra_tissue$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_tissue[sra_tissue$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_tissue[sra_tissue$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_tissue%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_tissue$disease=factor(sra_tissue$disease,levels=df2[order(df2$samples,decreasing = TRUE),]$disease)
sra_tissue$tissue_main=sra_tissue$tissue
sra_tissue[sra_tissue$tissue=="fibroblast",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="hip",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="inner ear",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="joint",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="knee",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="lymph node",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="muscle",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="peritoneal lavage",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="retina",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="salivary gland",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="spleen",]$tissue_main="other"
ggplot(data=sra_tissue, aes(x=disease,y=samples,fill=tissue_main)) + geom_bar(position="stack",stat="identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
#scale_y_continuous(breaks=seq(0,9000,1000)) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
ggplot(data=subset(sra_tissue,tissue_main=="other"), aes(x=disease,y=samples,fill=tissue)) + geom_bar(position="stack",stat="identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
#scale_y_continuous(breaks=seq(0,9000,1000)) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,250,10)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
df2=sra_tissue%>%
group_by(tissue)%>%
summarise(samples=sum(samples))
#Figure S6----------organism------
sra_organism <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_organism.tsv")
sra_organism=as.data.frame(sra_organism)
sra_organism$organism=as.character(sra_organism$organism)
sra_organism$disease=as.character(sra_organism$disease)
sra_organism$samples=as.integer(sra_organism$samples)
sra_organism[sra_organism$organism=="klebsiella pneumoniae",]$organism="bacteria"
sra_organism[sra_organism$organism=="pseudomonas aeruginosa",]$organism="bacteria"
sra_organism[sra_organism$organism=="staphylococcus aureus",]$organism="bacteria"
sra_organism[sra_organism$organism=="staphylococcus pseudintermedius",]$organism="bacteria"
sra_organism[sra_organism$organism=="streptococcus pyogenes",]$organism="bacteria"
sra_organism[sra_organism$organism=="uncultured bacterium",]$organism="bacteria"
sra_organism[sra_organism$organism=="flavobacterium psychrophilum",]$organism="bacteria"
sra_organism[sra_organism$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_organism[sra_organism$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_organism[sra_organism$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_organism%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_organism$disease=factor(sra_organism$disease,levels=df2[order(df2$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_organism, aes(x=disease,y=samples,fill=organism)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df2, aes(x=disease,y=9000,label=samples,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure S7--------phenotype--------
sra_phenotype <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_phenotype.tsv")
sra_phenotype=as.data.frame(sra_phenotype)
sra_phenotype$phenotype=as.character(sra_phenotype$phenotype)
sra_phenotype$disease=as.character(sra_phenotype$disease)
sra_phenotype$samples=as.integer(sra_phenotype$samples)
sra_phenotype[sra_phenotype$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_phenotype[sra_phenotype$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_phenotype[sra_phenotype$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_phenotype%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_phenotype$disease=factor(sra_phenotype$disease,levels=df2[order(df2$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_phenotype, aes(x=disease,y=samples,fill=phenotype)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df2, aes(x=disease,y=9000,label=samples,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#------sra vs pubmed------
sra_pubmed <- read.delim("~/pubmed_rheuma_HTS/sra_pubmed_copy.tsv")
sra_pubmed=as.data.frame(sra_pubmed)
sra_pubmed$assay=as.character(sra_pubmed$assay)
sra_pubmed$disease=as.character(sra_pubmed$disease)
sra_pubmed$patient.information=as.character(sra_pubmed$patient.information)
SLE_pubmed=sra_pubmed[sra_pubmed$disease=="SLE",]
SLE_pubmed_rna_patient_info=SLE_pubmed[SLE_pubmed$assay %in% c("rna-seq","ncrna-seq","mirna-seq"),]
SLE_pubmed_rna=SLE_pubmed[SLE_pubmed$assay %in% c("rna-seq","ncrna-seq","mirna-seq"),]$pubmed
SLE_result_df=result_df[result_df$disease=="SLE",]
SLE_result_df_rna=SLE_result_df[SLE_result_df$assay %in% c("RNA-Seq","scRNA-Seq","Targeted DNA Seq,RNA-Seq","Epigenomics,RNA-Seq"),]$pmid
intersect(SLE_result_df_rna,SLE_pubmed_rna)
diff_pubmed_result_df=setdiff(SLE_result_df_rna,SLE_pubmed_rna)
sra_pubmed_missing <- read.delim("~/pubmed_rheuma_HTS/data_availability_sle.txt")
setdiff(diff_pubmed_result_df,sra_pubmed_missing$pmid)
#export results
write.csv2(SLE_pubmed_rna_patient_info,"~/pubmed_rheuma_HTS/SLE_pubmed_rna_patient_info.csv")
#Figure S12-----plot patient information---------------------------
df2=SLE_pubmed_rna_patient_info%>%
count(patient.information,name="number")
SLE_pubmed_rna_patient_info$patient.information=factor(SLE_pubmed_rna_patient_info$patient.information,levels=df2[order(df2$number,decreasing = TRUE),]$patient.information)
ggplot(data=SLE_pubmed_rna_patient_info, aes(x=patient.information)) + geom_bar() +
geom_text(data = df2, aes(x=patient.information,y=10,label=number),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,15,5)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure S8-----plot raw data availability---------------------------
df2=sra_pubmed_missing%>%
#group_by(availability)%>%
count(availability,name="number")
sra_pubmed_missing$availability=factor(sra_pubmed_missing$availability,levels=df2[order(df2$number,decreasing = TRUE),]$availability)
ggplot(data=sra_pubmed_missing, aes(x=availability)) + geom_bar() +
geom_text(data = df2, aes(x=availability,y=35,label=number,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + #scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
| /pubmed_search.R | no_license | sebboegel/pubmed_rheuma_HTS | R | false | false | 48,281 | r | library(easyPubMed)
library(tidyverse)
library(data.table)
library(ggplot2)
library(dplyr)
#search performed August 15, 2020
pubmed_query_string="(methylomics OR epigenomics OR NGS OR \"next generation sequencing\" OR RNA-Seq OR \"mRNA sequencing\" OR \"RNA sequencing\" OR \"RNA-sequencing\" OR \"transcriptome sequencing\" OR \"whole exome sequencing\" OR \"whole-exome sequencing\" OR \"high throughput sequencing\" OR \"high-throughput sequencing\" OR \"DNA sequencing\" OR \"RNA sequencing\" OR \"RNA-sequencing\" OR \"DNA-sequencing\" OR WXS OR WGS OR \"whole-genome sequencing\" OR \"whole genome sequencing\") AND (rheumatology OR \"rheumatologic disease\" OR \"rheumatologic disease\" ))"
pubmed_ids <- get_pubmed_ids(pubmed_query_string)
#1097 entries
abstracts_xml <- fetch_pubmed_data(pubmed_ids,retmax = 5000)
abstracts_list <- articles_to_list(abstracts_xml)
saveRDS(abstracts_list,file="/Users/sebastian/pubmed_rheuma_HTS/abstracts_list_for_keywords.RDS")
final=c("")
for (key in abstracts_list){
keywords = custom_grep(key,"KeywordList","char")
if(!is.null(keywords)){
out=str_replace_all(keywords,"<Keyword MajorTopicYN=\"N\">","")
out=str_replace_all(out,"<Keyword MajorTopicYN=\"Y\">","")
out=strsplit(out,"</Keyword>")[[1]]
final=c(final,out)
}
}
final=final[final!=" "]
final=tolower(final)
finalICD11_filtered=c("")
icd11 <- read.delim("~/icd11.txt", stringsAsFactors=FALSE)
icd11$Title=tolower(icd11$Title)
for (key in final){
retrows=icd11[icd11$Title %like% key,]
if (length(retrows) >= 1){
finalICD11_filtered=c(finalICD11_filtered,key)}}
finalTable_freq=table(finalICD11_filtered)
View(finalTable_freq)
write.csv2(finalTable_freq,"/Users/sebastian/pubmed_rheuma_HTS/finalTable_freq.csv")
#326 entries in finalTable_freq
#search performed Sep,4, 2020
pubmed_query_string="(methylomics OR epigenomics OR NGS OR \"next generation sequencing\" OR RNA-Seq OR \"mRNA sequencing\" OR \"RNA sequencing\" OR \"RNA-sequencing\" OR \"transcriptome sequencing\" OR \"whole exome sequencing\" OR \"whole-exome sequencing\" OR \"high throughput sequencing\" OR \"high-throughput sequencing\" OR WXS OR WGS OR \"whole-genome sequencing\" OR \"whole genome sequencing\") AND (\"autoinflammatory syndrome\" OR dermatomyositis OR enthesitis OR \"familial mediterranean fever\" OR \"granulomatosis with polyangiitis\" OR \"juvenile idiopathic arthritis\" OR myositis OR osteoarthritis OR polymyositis OR \"psoriatic arthritis\" OR \"rheumatoid arthritis\" OR sacroiliitis OR \"sjögren syndrome\" OR \"sjögren's syndrome\" OR spondyloarthritis OR synovitis OR \"systemic lupus erythematosus\" OR \"systemic sclerosis\" OR vasculitis OR uveitis OR gout OR polychondritis)"
pubmed_ids <- get_pubmed_ids(pubmed_query_string)
pubmed_ids$Count
#1162 hits
abstracts_xml <- fetch_pubmed_data(pubmed_ids,retmax = 5000)
abstracts_list <- articles_to_list(abstracts_xml)
diseases=c("autoinflammatory syndrome","dermatomyositis","enthesitis","familial mediterranean fever","granulomatosis with polyangiitis","juvenile idiopathic arthritis","myositis","osteoarthritis","polymyositis","psoriatic arthritis","rheumatoid arthritis","sacroiliitis","sjögren syndrome","sjögren's syndrome","spondyloarthritis","synovitis","systemic lupus erythematosus","systemic sclerosis","vasculitis","uveitis","gout","polychondritis")
RNA=c("rna sequencing", "transcriptome sequencing", "rna-seq","rna-based next-generation sequencing","mrna profiles","rna-sequencing")
WXS=c("whole exome sequencing","whole-exome sequencing","whole-exome-sequencing","amplicon sequencing")
WGS=c("whole-genome sequencing","whole genome sequencing","whole-genome shotgun sequencing")
Bacteria=c("16s","metagenomics")
Epigenomic=c("atac-seq","chip-seq","wgbs","methylomics","methylomic","epigenomics")
single=c("single cell","single-cell")
result=NULL
for (key in abstracts_list){
review=custom_grep(key,"PublicationType","char")
journal=str_replace(custom_grep(custom_grep(key,"Journal","char"),"Title","char"),"&","&")
year=custom_grep(custom_grep(key,"PubDate","char"),"Year","char")
title=custom_grep(key,"ArticleTitle","char")
title=tolower(title)
abstract = custom_grep(key,"Abstract","char")
abstract=tolower(abstract)
keywords=custom_grep(key,"KeywordList","char")
keywords=tolower(keywords)
title_abstract_key=paste(title,abstract,sep=",")
title_abstract_key=paste(title_abstract_key,keywords,sep=",")
pubmedid=custom_grep(key,"PMID","char")
if(!is.null(title_abstract_key) & length(grep("Review",review))==0){
assay=""
for (epi in Epigenomic){
if (length(grep(epi,title_abstract_key))>0){assay=paste(assay,"Epigenomics",sep=",")}
}
for (bac in Bacteria){
if (length(grep(bac,title_abstract_key))>0){assay=paste(assay,"Metagenomics",sep=",")}
}
for (rna in RNA){
if (length(grep(rna,title_abstract_key))>0){
if(!grepl("RNA",assay)){
assay=paste(assay,"RNA-Seq",sep=",")}}
}
for (wxs in WXS){
if (length(grep(wxs,title_abstract_key))>0){
if(!grepl("WES",assay)){
assay=paste(assay,"WES",sep=",")}}
}
for (wgs in WGS){
if (length(grep(wgs,title_abstract_key))>0){assay=paste(assay,"WGS",sep=",")}
}
for (sin in single){
if (length(grep(sin,title_abstract_key))>0){assay=paste(assay,"single",sep=",")}
}
for (disease in diseases){
if (length(title_abstract_key)>0){
if(title_abstract_key %like% disease){
result=rbind(result,c(disease,pubmedid[1],substr(assay,2,nchar(assay)),journal,year))
}
}
}
}
}
result_df=as.data.frame(result)
names(result_df)=c("disease","pmid","assay","journal","year")
result_df$assay=as.character(result_df$assay)
result_df$journal=as.character(result_df$journal)
result_df$disease=as.character(result_df$disease)
result_df$disease=stringr::str_to_title(result_df$disease)
#manually adding missing data by looking at each publication using the PMID
#995 entries, 847 unique Pubmed-IDs
result_df$pmid=as.character(result_df$pmid)
result_df[result_df$pmid=="30944248",]$assay="RNA-Seq,single"
result_df[result_df$pmid=="31428656",]$assay="RNA-Seq,single"
result_df[result_df$pmid=="31370803",]$assay="Metagenomics"
result_df[result_df$pmid=="31360262",]$assay="RNA-Seq"
result_df[result_df$pmid=="31344123",]$assay="RNA-Seq"
result_df[result_df$pmid=="31337345",]$assay="DNA gene panel"
result_df[result_df$pmid=="31101814",]$assay="WGS,WES"
result_df[result_df$pmid=="30783801",]$assay="DNA gene panel"
result_df[result_df$pmid=="30513227",]$assay="DNA gene panel"
result_df[result_df$pmid=="29892977",]$assay="RNA-Seq"
result_df[result_df$pmid=="29891556",]$assay="RNA-Seq"
result_df[result_df$pmid=="29735907",]$assay="DNA gene panel"
result_df[result_df$pmid=="29681619",]$assay="DNA gene panel"
result_df[result_df$pmid=="29659823",]$assay="WES"
result_df[result_df$pmid=="29657145",]$assay="RNA-Seq"
result_df[result_df$pmid=="29432186",]$assay="RNA-Seq"
result_df[result_df$pmid=="28791018",]$assay="DNA gene panel"
result_df[result_df$pmid=="28750028",]$assay="DNA gene panel"
result_df[result_df$pmid=="28728565",]$assay="RNA-Seq"
result_df[result_df$pmid=="28523199",]$assay="DNA gene panel"
result_df[result_df$pmid=="31856934",]$assay="WES"
result_df[result_df$pmid=="31848804",]$assay="DNA gene panel"
result_df[result_df$pmid=="31993940",]$assay="WES"
result_df[result_df$pmid=="31988805",]$assay="RNA-Seq"
result_df[result_df$pmid=="31598594",]$assay="DNA gene panel"
result_df[result_df$pmid=="31856934",]$year=2019
result_df[result_df$pmid=="29739689",]$year=2018
result_df[result_df$pmid=="31003835",]$year=2019
result_df[result_df$pmid=="31538826",]$year=2019
result_df[result_df$pmid=="31003835",]$assay="Metagenomics"
result_df[result_df$pmid=="23666743",]$assay="WES"
result_df[result_df$pmid=="25285625",]$assay="DNA gene panel"
result_df[result_df$pmid=="25091625",]$assay="Epigenomics,RNA-Seq"
result_df[result_df$pmid=="24795478",]$assay="WGS,RNA-Seq"
result_df[result_df$pmid=="23606709",]$assay="RNA-Seq"
result_df[result_df$pmid=="32042094",]$assay="DNA gene panel"
result_df[result_df$pmid=="31926670",]$assay="Metagenomics"
result_df[result_df$pmid=="31921732",]$assay="Metagenomics"
result_df[result_df$pmid=="31898522",]$assay="RNA-Seq"
result_df[result_df$pmid=="31880128",]$assay="Metagenomics"
result_df[result_df$pmid=="31874111",]$assay="WGS,WES"
result_df[result_df$pmid=="31779271",]$assay="RNA-Seq"
result_df[result_df$pmid=="31832070",]$assay="DNA gene panel"
result_df[result_df$pmid=="31538826",]$assay="DNA gene panel"
result_df[result_df$pmid=="31523167",]$assay="RNA-Seq"
result_df[result_df$pmid=="31443670",]$assay="DNA gene panel"
result_df[result_df$pmid=="31412876",]$assay="DNA gene panel"
result_df[result_df$pmid=="31237906",]$assay="RNA-Seq"
result_df[result_df$pmid=="30987788",]$assay="DNA gene panel"
result_df[result_df$pmid=="30850477",]$assay="RNA-Seq"
result_df[result_df$pmid=="30836987",]$assay="RNA-Seq"
result_df[result_df$pmid=="30724444",]$assay="RNA-Seq"
result_df[result_df$pmid=="30700427",]$assay="Metagenomics"
result_df[result_df$pmid=="30544699",]$assay="RNA-Seq"
result_df[result_df$pmid=="30185675",]$assay="RNA-Seq"
result_df[result_df$pmid=="30123050",]$assay="RNA-Seq"
result_df[result_df$pmid=="29683194",]$assay="DNA gene panel"
result_df[result_df$pmid=="29614084",]$assay="DNA gene panel"
result_df[result_df$pmid=="29490685",]$assay="DNA gene panel"
result_df[result_df$pmid=="29465611",]$assay="DNA gene panel"
result_df[result_df$pmid=="29381936",]$assay="DNA gene panel"
result_df[result_df$pmid=="29371932",]$assay="RNA-Seq"
result_df[result_df$pmid=="29247798",]$assay="RNA-Seq"
result_df[result_df$pmid=="29137139",]$assay="RNA-Seq"
result_df[result_df$pmid=="29129473",]$assay="DNA gene panel"
result_df[result_df$pmid=="29123165",]$assay="RNA-Seq"
result_df[result_df$pmid=="28808260",]$assay="RNA-Seq"
result_df[result_df$pmid=="28597968",]$assay="WGS,WES"
result_df[result_df$pmid=="28495399",]$assay="DNA gene panel"
result_df[result_df$pmid=="28348750",]$assay="Metagenomics"
result_df[result_df$pmid=="28339495",]$assay="RNA-Seq"
result_df[result_df$pmid=="28179509",]$assay="Metagenomics"
result_df[result_df$pmid=="28115215",]$assay="WGS"
result_df[result_df$pmid=="27835701",]$assay="RNA-Seq"
result_df[result_df$pmid=="27821747",]$assay="PhIP-Seq"
result_df[result_df$pmid=="27571913",]$assay="Metagenomics"
result_df[result_df$pmid=="27542282",]$assay="RNA-Seq"
result_df[result_df$pmid=="27420474",]$assay="RNA-Seq"
result_df[result_df$pmid=="27390188",]$assay="DNA gene panel"
result_df[result_df$pmid=="29635721",]$assay="DNA gene panel"
result_df[result_df$pmid=="29200130",]$assay="DNA gene panel"
result_df[result_df$pmid=="28043923",]$assay="WES"
result_df[result_df$pmid=="27183593",]$assay="RNA-Seq"
result_df[result_df$pmid=="26815131",]$assay="DNA gene panel"
result_df[result_df$pmid=="26713667",]$assay="RNA-Seq"
result_df[result_df$pmid=="26603474",]$assay="DNA gene panel"
result_df[result_df$pmid=="26227771",]$assay="DNA gene panel"
result_df[result_df$pmid=="26001779",]$assay="RNA-Seq"
result_df[result_df$pmid=="25946710",]$assay="RNA-Seq"
result_df[result_df$pmid=="25700344",]$assay="DNA gene panel,RNA-Seq"
result_df[result_df$pmid=="25638290",]$assay="DNA gene panel"
result_df[result_df$pmid=="25638290",]$assay="WES"
result_df[result_df$pmid=="25498120",]$assay="DNA gene panel"
result_df[result_df$pmid=="25335895",]$assay="RNA-Seq"
result_df[result_df$pmid=="30481710",]$assay="Metagenomics"
result_df[result_df$pmid=="29465611",]$assay="DNA gene panel"
result_df[result_df$pmid=="29997562",]$assay="DNA gene panel"
result_df[result_df$pmid=="22472776",]$assay="DNA gene panel"
result_df[result_df$pmid=="22294635",]$assay="RNA-Seq"
result_df[result_df$pmid=="30746468",]$assay="RNA-Seq"
result_df[result_df$pmid=="31682074",]$assay="RNA-Seq"
result_df[result_df$pmid=="32265907",]$assay="RNA-Seq"
result_df[result_df$pmid=="32237059",]$assay="RNA-Seq"
result_df[result_df$pmid=="32199921",]$assay="DNA gene panel"
result_df[result_df$pmid=="32159782",]$assay="WGS"
result_df[result_df$pmid=="32115259",]$assay="RNA-Seq"
result_df[result_df$pmid=="24136464",]$assay="DNA gene panel"
result_df[result_df$pmid=="27402083",]$assay="RNA-Seq"
result_df[result_df$pmid=="28053302",]$assay="DNA gene panel"
result_df[result_df$pmid=="28053320",]$assay="RNA-Seq"
result_df[result_df$pmid=="28532706",]$assay="WES"
result_df[result_df$pmid=="29472286",]$assay="WES" #case report
result_df[result_df$pmid=="29500522",]$assay="DNA gene panel"
result_df[result_df$pmid=="29040051",]$assay="DNA gene panel" #case reports
result_df[result_df$pmid=="29774027",]$assay="Epigenomics" #bisulfite amplicon seq using NGS
result_df[result_df$pmid=="30962246",]$assay="RNA-Seq" #PRJNA427177
result_df[result_df$pmid=="31101603",]$assay="DNA gene panel" #TCR receptor seq
result_df[result_df$pmid=="31882654",]$assay="RNA-Seq" #BCR repertoire RA
result_df[result_df$pmid=="31926583",]$assay="RNA-Seq" #no making data available
result_df[result_df$pmid=="32191636",]$assay="RNA-Seq" #GEO but embargo
result_df[result_df$pmid=="32196497",]$assay="RNA-Seq" #re-used RNA-Seq data from e.g. SRA
result_df[result_df$pmid=="32332704",]$assay="RNA-Seq" #no making data available
result_df[result_df$pmid=="32365362",]$assay="RNA-Seq" #no making data available
result_df[result_df$pmid=="32403239",]$assay="RNA-Seq" #no making data available
result_df[result_df$pmid=="32471379",]$assay="DNA gene panel" #case reports
result_df[result_df$pmid=="32510848",]$assay="DNA gene panel"
result_df[result_df$pmid=="32518584",]$assay="RNA-Seq" #RA, datasets available on request
result_df[result_df$pmid=="32552384",]$assay="DNA gene panel" #case reports
result_df[result_df$pmid=="32560314",]$assay="RNA-Seq" #PsA, no making data available
result_df[result_df$pmid=="32565918",]$assay="Metagenomics"
result_df[result_df$pmid=="32607330",]$assay="DNA gene panel"
result_df[result_df$pmid=="32659156",]$assay="WES" #case report
result_df[result_df$pmid=="32714036",]$assay="Metagenomics"
result_df[result_df$pmid=="32714036",]$assay="Metagenomics" #case report Virus identification
result_df[result_df$pmid=="32735477",]$assay="Metagenomics" #case report Virus identification
result_df[result_df$pmid=="32745911",]$assay="RNA-Seq" #OA, no making data available
result_df[result_df$pmid=="32807082",]$assay="Metagenomics" #case report Virus identification
result_df[result_df$pmid=="32818496",]$assay="RNA-Seq" #SLE (LN), https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE155405 (Mouse)
result_df[result_df$pmid=="31651337",]$assay="Epigenomics" #whole genome bisulfite seq using HiSeq4000
result_df[result_df$pmid=="32438470",]$assay="Epigenomics" #bioinformatics whole genome bisulfite seq
result_df[result_df$pmid=="31926583",]$pmid=31465725
#Epigenomics=bisulfite seq using NGS, ATAC-Seq
#DNA gene panel=targeted re-sequencing
#delete entries
#no information about NGS assay: 24065968,29607230,32264838,23942639
#no NGS: 21262369,22046267 (GWAS),20514598, 29257335 microarray, 28686088,22402741,22275833,22627728,18037627,18390570,20842748,16227418,15022312,11317015,10750629,26705675,30679579,27421624,
#26984631,25990289 microarray,25828588,28417081,31347786,30906878,12522564,21192791,21436623,21865261,22014533 ChIP microarray,22300536, 23340290,23353785,23950730 (Methylation400 assay),
#24812288, 25261579 (HumanMethylation450 BeadChip arrays), 25537884,25661834 (microarray), 25789080, 26079624, 26492816, 26556652(HumanMethylation450 BeadChips), 26628598(Microarray), 27325182, 27818202
#27876072, 27885849(HumanMethylation450 BeadChips),28081217,28430662,28475762(HumanMethylation450 BeadChips), 28785300(GWAS), 29018507(BS-Seq),26111028,26787370(BS-Seq),29422534,30385847,
#30598166,30779748,31467281(llumina MethylationEPIC BeadChip),31513634(Infinium MethylationEPIC BeadChip array),32509015(microarray),25414732,29854846,32641481
#32581359:GWAS,24445253, 24223582 GWAS,24013639 GWAS
#not disease defined in 1st round:
#29769526, 31021403 periodontal diagnosis, 30100989,31033783 oncology, 31745718, 30337930,#26182405 Aicardi-Goutières-Syndrom,29710329 meninigitis,31006511 not using Rheuma seq data, 29434599(keratinocyes)
#32489448 glioma,31091335,31081255,30974145,30313048(myocard infarct),29608426(zika virus infection),29184082(cervical cancer),28920417
#no access and no info of assay in abstract: #30441946,31494232,30946936,30054207,30774014,31586650,25799352,25541309,25373782,25791244,25791245,29113828,30523707,31693422,31755746,30304699,31883828,32157911
#29674726 astrovirus in goslings, 28012117, 31744152 in chicken, #29920970 goose-origin astrovirus, 32532177 (pig)
#not english: 30463656,23981988,30124204 article in chinese
#retracted: 30643044
#not primary research article:#31652165,25598697,24070009,22933063,30632919,31858722 review, 26284111 PERSPECTIVE ARTICLE,28286571 Commentary about NGS and gut microbiome,21273237,
#23025638 & 23244304 Editorial,23275983, 25167330 Commentary,25366188,25557657, 26231343 (editorial), 27028588(meeting report),27482953(editorial),27607472(review),28447857(editorial),
#25165988(Commetary),28770613(review),29644081(review&meta-analysis),31699217,31876195,31902241(review&meta-analysis),32203190(review),32475529(book chapter, review),32611772(editorial),32746644(review),32806879(review)
#30399325(review)
deletepmids=c("32264838","31494232","31744152","30632919","22933063","25373782","25541309","25799352","26182405","26705675","28012117","28286571","28417081",
"28686088","29257335","29674726","29769526","29920970","30100989","30441946","30463656","30643044","30946936","31021403","31033783","31652165",
"31745718","26284111","22402741","25598697","24065968","24070009","22275833","22627728","18037627","18390570","20842748","16227418","15022312",
"11317015","10750629","30054207","30774014","31586650","30337930","30679579","31858722","29607230","29710329","27421624","26984631","25990289",
"25828588","31347786","31006511","30906878","12522564","21192791","21273237","21436623","21865261","22014533","22300536","23025638","23244304",
"23275983","23340290","23353785","23942639","23950730","23981988","24812288","25167330","25261579","25366188","25537884","25557657","25661834",
"25789080","25791244","25791245","26079624","26231343","26492816","26556652","26628598","27028588","27325182","27482953","27607472","27818202",
"27876072","27885849","28081217","28430662","28447857","25165988","28475762","28770613","28785300","29018507","26787370","26111028","29113828",
"29422534","29644081","30523707","31693422","31755746","30124204","30304699","30385847","30598166","30779748","31467281","31513634","31699217",
"31876195","31883828","31902241","32157911","32203190","32475529","32509015","32532177","32611772","32746644","32806879","25414732","29854846",
"32641481","29434599","32581359","32489448","31091335","31081255","30974145","30399325","30313048","29608426","29184082","28920417","24445253",
"24223582","24013639","20514598","21262369","22046267")
result_df=result_df[(!result_df$pmid %in% deletepmids),]
#Aicardi-Goutières syndrome–like
result_df[result_df$pmid=="31874111" & result_df$disease=="Gout",]=NA
result_df <- na.omit(result_df)
#no NGS on SLE samples
result_df[result_df$pmid=="29720240" & result_df$disease=="SLE",]=NA
result_df <- na.omit(result_df)
#manually add studies identified via SRA project search
result_df=rbind(result_df,c("SLE",24645875,"RNA-Seq","Connective tissue research",2014,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",31263277,"Epigenomics,RNA-Seq","Nature Immunology",2019,"Other"))
result_df=rbind(result_df,c("SLE",31890206,"RNA-Seq","Clinical & translational immunology",2019,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",29717110,"RNA-Seq","Nature communications",2018,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",28420548,"RNA-Seq","Journal of autoimmunity",2017,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",30130253,"RNA-Seq","The Journal of clinical investigation",2018,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",31200750,"RNA-Seq","Arthritis research & therapy",2019,"RNA-Seq"))
result_df=rbind(result_df,c("SLE",30478422,"Epigenomics,RNA-Seq","Nature immunology",2019,"Other"))
result_df=rbind(result_df,c("SLE",31616406,"RNA-Seq","Frontiers in immunology",2019,"RNA-Seq"))
result_df=rbind(result_df,c("RA",31616406,"RNA-Seq","Frontiers in immunology",2019,"RNA-Seq"))
result_df=rbind(result_df,c("Sjögren's Syndrome",31616406,"RNA-Seq","Frontiers in immunology",2019,"RNA-Seq"))
#Rename categories
result_df[result_df$assay=="RNA-Seq,WGS",]$assay="WGS,RNA-Seq"
result_df[result_df$assay=="Epigenomics,Epigenomics",]$assay="Epigenomics"
result_df[result_df$assay=="Epigenomics,Epigenomics,RNA-Seq",]$assay="Epigenomics,RNA-Seq"
result_df[result_df$assay=="WES,WGS",]$assay="WGS,WES"
result_df[result_df$assay=="single",]$assay="scRNA-Seq"
result_df[result_df$assay=="single,single",]$assay="scRNA-Seq"
result_df[result_df$assay=="RNA-Seq,single",]$assay="scRNA-Seq"
result_df[result_df$assay=="WES,single",]$assay="WES,scRNA-Seq"
result_df[result_df$assay=="RNA-Seq,single,single",]$assay="scRNA-Seq"
result_df[result_df$assay=="DNA gene panel",]$assay="Targeted DNA Seq"
result_df[result_df$assay=="DNA gene panel,RNA-Seq",]$assay="Targeted DNA Seq,RNA-Seq"
result_df$assay_main=result_df$assay
result_df[result_df$assay=="Metagenomics",]$assay_main="Other"
result_df[result_df$assay=="Metagenomics,RNA-Seq,WES",]$assay_main="Other"
result_df[result_df$assay=="Epigenomics",]$assay_main="Other"
result_df[result_df$assay=="Epigenomics,RNA-Seq",]$assay_main="Other"
result_df[result_df$assay=="Epigenomics,RNA-Seq,WES",]$assay_main="Other"
result_df[result_df$assay=="Epigenomics,WGS",]$assay_main="Other"
result_df[result_df$assay=="Metagenomics,RNA-Seq",]$assay_main="Other"
result_df[result_df$assay=="PhIP-Seq",]$assay_main="Other"
result_df[result_df$disease=="Systemic Lupus Erythematosus",]$disease="SLE"
result_df[result_df$disease=="Familial Mediterranean Fever",]$disease="FMF"
result_df[result_df$disease=="Rheumatoid Arthritis",]$disease="RA"
result_df[result_df$disease=="Juvenile Idiopathic Arthritis",]$disease="JIA"
result_df[result_df$disease=="Autoinflammatory Syndrome",]$disease="AutoSyn"
result_df[result_df$disease=="Granulomatosis With Polyangiitis",]$disease="GPA"
saveRDS(result_df,file="/Users/sebastian/pubmed_rheuma_HTS/result_df_11Sep2020.RDS")
#-------redo all figures from paper--------------------------------
#Figure 3: diseases vs assay
df2=result_df%>%
group_by(disease)%>%
count(disease,name="number")
ggplot(data=result_df, aes(x=fct_infreq(disease),fill=assay_main)) + geom_bar(position="stack",stat="count") +
labs(y="# publications on pubmed") +
geom_text(data = df2, aes(x=disease,y=200,label=number,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,200,10)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure S3. disease vs other_assays
ggplot(data=subset(result_df,assay_main=="Other"), aes(x=fct_infreq(disease),fill=assay)) + geom_bar(position="stack",stat="count") +
labs(y="# publications on pubmed") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
result_df_unique=result_df
result_df_unique$disease=NULL
result_df_unique=unique(result_df_unique)
#S2: scRNA-Seq vs year
ggplot(data=subset(result_df_unique, result_df_unique$assay %in% c("scRNA-Seq","WES,scRNA-Seq")), aes(x=year,fill=assay)) + geom_bar(position="stack",stat="count") +
labs(y="# unique publications on pubmed") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,25,5)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure 2: publications vs year
df2=result_df_unique%>%
group_by(year)%>%
count(year,name="number")
abundance_plot=ggplot(data=result_df_unique, aes(x=year,fill=assay_main)) + geom_bar(position="stack",stat="count") +
labs(y="# unique publications on pubmed") +
geom_text(data = df2, aes(x=year,y=200,label=number,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,400,10)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
abundance_plot
#FigureS1: exponential growth
occurences=table(unlist(result_df_unique$year))
year_abundance=as.data.frame(occurences[9:18])
names(year_abundance)=c("year","abundance")
df <- cbind(year_abundance, index = 1:nrow(year_abundance))
fit <- lm(log(abundance) ~ index, data = df)
abundance_plot + stat_function(fun = function(x) exp(fit$coefficients[1] + x*fit$coefficients[2]))
#------journals vs assays
journal_freq=table(result_df_unique$journal)
journal_freq=as.data.frame(journal_freq)
names(journal_freq)=c("journal","freq")
journal_freq$freq=as.numeric(journal_freq$freq)
ggplot(data=subset(result_df_unique,journal %in% journal_freq[journal_freq$freq>=8,]$journal), aes(x=fct_infreq(journal),fill=assay_main)) + geom_bar(position="stack",stat="count") +
labs(y="# unique publications on pubmed") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,60,10)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
TableForPub=NULL
for (key in abstracts_list){
pubmedid=custom_grep(key,"PMID","char")
if (pubmedid[1] %in% result_df$pmid){
title=custom_grep(key,"ArticleTitle","char")
abstract = custom_grep(key,"Abstract","char")
TableForPub=rbind(TableForPub,c(pubmedid[1],title,abstract))
}
}
#create table with title and abstract of all identified publications
TableForPub=as.data.frame(TableForPub)
names(TableForPub)=c("pmid","title","abstract")
#export both result tables
write.csv2(result_df,"~/pubmed_rheuma_HTS/main_results.csv")
write.csv2(TableForPub,"~/pubmed_rheuma_HTS/title_and_abstracts.csv")
#number of assays in unique publications
length(unique(result_df[result_df$assay %in% c("RNA-Seq","Epigenomics,RNA-Seq","Epigenomics,RNA-Seq,WES","Metagenomics,RNA-Seq","Metagenomics,RNA-Seq,WES","RNA-Seq,WES","scRNA-Seq","Targeted DNA Seq,RNA-Seq","WES,scRNA-Seq","WGS,RNA-Seq"),]$pmid))
length(unique(result_df[result_df$assay %in% c("scRNA-Seq","WES,scRNA-Seq"),]$pmid))
length(unique(result_df[result_df$assay %in% c("WES","Epigenomics,RNA-Seq,WES","Metagenomics,RNA-Seq,WES","RNA-Seq,WES","WES,scRNA-Seq","WGS,WES","Targeted DNA Seq","Targeted DNA Seq,RNA-Seq"),]$pmid))
length(unique(result_df[result_df$assay %in% c("Metagenomics","Metagenomics,RNA-Seq,WES","Metagenomics,RNA-Seq"),]$pmid))
length(unique(result_df[result_df$assay %in% c("WGS","Epigenomics,WGS","WGS,RNA-Seq","WGS,WES"),]$pmid))
length(unique(result_df[result_df$assay %in% c("Epigenomics","Epigenomics,WGS","Epigenomics,RNA-Seq","Epigenomics,RNA-Seq,WES"),]$pmid))
#2nd analysis----------------------------------------------------
#no datasets: FMF, enthesitis, polychondritis
#gout: only cancer datasets
#-----------Figure 4 -------SRA datasets
sra_datasets <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_assays.tsv")
sra_datasets=as.data.frame(sra_datasets)
sra_datasets$assay=as.character(sra_datasets$assay)
sra_datasets[sra_datasets$assay=="chip",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="chip-seq",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="mirna-seq",]$assay="miRNA/ncRNA-Seq"
sra_datasets[sra_datasets$assay=="ncrna-seq",]$assay="miRNA/ncRNA-Seq"
#Methylation=Bisulfite-Seq, MEDIP-Seq/MRE-Seq
sra_datasets[sra_datasets$assay=="bisulfite-seq",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="atac-seq",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="dnase-hypersensitivity",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="medip-seq",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="mre-seq",]$assay="Epigenomics"
sra_datasets[sra_datasets$assay=="amplicon",]$assay="Targeted-capture"
sra_datasets[sra_datasets$assay=="targeted-capture",]$assay="Targeted-capture"
sra_datasets[sra_datasets$assay=="rna-seq",]$assay="RNA-Seq"
sra_datasets[sra_datasets$assay=="wgs",]$assay="WGS"
sra_datasets[sra_datasets$assay=="wxs",]$assay="WXS"
sra_datasets[sra_datasets$assay=="tn-seq",]$assay="TN-Seq"
sra_datasets[sra_datasets$assay=="mbd-seq",]$assay="MBD-Seq"
sra_datasets[sra_datasets$assay=="hi-c",]$assay="Hi-C"
sra_datasets[sra_datasets$assay=="other",]$assay="Other"
sra_datasets$disease=as.character(sra_datasets$disease)
sra_datasets$samples=as.integer(sra_datasets$samples)
sra_datasets[sra_datasets$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_datasets[sra_datasets$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_datasets[sra_datasets$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_datasets%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_datasets$disease=factor(sra_datasets$disease,levels=df2[order(df2$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_datasets, aes(x=disease,y=samples,fill=assay)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df2, aes(x=disease,y=9000,label=samples,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure S5 ------------samples per study----------------------------
sra_studies <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_studies.tsv")
sra_studies=as.data.frame(sra_studies)
sra_studies$number=as.integer(sra_studies$number)
sra_studies$disease=as.character(sra_studies$disease)
sra_studies[sra_studies$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_studies[sra_studies$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_studies[sra_studies$disease=="Sjoegren",]$disease="Sjoegren's Syndrome"
df2=sra_studies%>%
group_by(disease)%>%
count(disease,name="number")
sra_studies$disease=factor(sra_studies$disease,levels=df2[order(df2$number,decreasing = TRUE),]$disease)
dataMedian <- summarise(group_by(sra_studies, disease), MD = median(number))
ggplot(data=sra_studies, aes(x=disease,y=number)) + geom_jitter() + geom_boxplot(alpha = 0.2,outlier.shape = NA) + scale_y_log10() +
geom_text(data = df2, aes(x=disease,y=8000,label=number,angle=60),inherit.aes = FALSE) +
geom_text(data = dataMedian, aes(disease, MD, label = MD), position = position_dodge(width = 0.8), size = 3, vjust = -0.5) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) + ylab("number of samples per study") +
theme(legend.title = element_blank()) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#FigureS9-------sequencing_instrument
sra_instruments <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_instrument.tsv")
sra_instruments=as.data.frame(sra_instruments)
sra_instruments$instrument=as.character(sra_instruments$instrument)
sra_instruments$disease=as.character(sra_instruments$disease)
sra_instruments$samples=as.integer(sra_instruments$samples)
sra_instruments[sra_instruments==0] <- NA
sra_instruments=sra_instruments[complete.cases(sra_instruments),]
sra_instruments[sra_instruments$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_instruments[sra_instruments$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_instruments[sra_instruments$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df_instrument=sra_instruments%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_instruments$disease=factor(sra_instruments$disease,levels=df_instrument[order(df_instrument$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_instruments, aes(x=disease,y=samples,fill=instrument)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df_instrument, aes(x=disease,y=9000,label=samples,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure S10-------layout
sra_layout <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_layout.tsv")
sra_layout=as.data.frame(sra_layout)
sra_layout$layout=as.character(sra_layout$layout)
sra_layout$disease=as.character(sra_layout$disease)
sra_layout$samples=as.integer(sra_layout$samples)
#sra_layout[sra_layout==0] <- NA
#sra_layout=sra_layout[complete.cases(sra_layout),]
sra_layout[sra_layout$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_layout[sra_layout$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_layout[sra_layout$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_layout%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_layout$disease=factor(sra_layout$disease,levels=df2[order(sra_layout$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_layout, aes(x=disease,y=samples,fill=layout)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df2, aes(x=disease,y=9000,label=samples, angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
#scale_y_continuous(breaks=seq(0,9000,1000)) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#----------source------
#SRA datasets
sra_source <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_source.tsv")
sra_source=as.data.frame(sra_source)
sra_source$source=as.character(sra_source$source)
sra_source$disease=as.character(sra_source$disease)
sra_source$samples=as.integer(sra_source$samples)
sra_source[sra_source$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_source[sra_source$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_source[sra_source$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_source%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_source$disease=factor(sra_source$disease,levels=df2[order(df2$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_source, aes(x=disease,y=samples,fill=source)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df2, aes(x=disease,y=9000,label=samples,),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure 5-------tissues
sra_tissue <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_tissue.tsv")
sra_tissue=as.data.frame(sra_tissue)
sra_tissue$tissue=as.character(sra_tissue$tissue)
sra_tissue$disease=as.character(sra_tissue$disease)
sra_tissue$samples=as.integer(sra_tissue$samples)
sra_tissue[sra_tissue==0] <- NA
sra_tissue=sra_tissue[complete.cases(sra_tissue),]
sra_tissue[sra_tissue$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_tissue[sra_tissue$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_tissue[sra_tissue$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_tissue%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_tissue$disease=factor(sra_tissue$disease,levels=df2[order(df2$samples,decreasing = TRUE),]$disease)
sra_tissue$tissue_main=sra_tissue$tissue
sra_tissue[sra_tissue$tissue=="fibroblast",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="hip",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="inner ear",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="joint",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="knee",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="lymph node",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="muscle",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="peritoneal lavage",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="retina",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="salivary gland",]$tissue_main="other"
sra_tissue[sra_tissue$tissue=="spleen",]$tissue_main="other"
ggplot(data=sra_tissue, aes(x=disease,y=samples,fill=tissue_main)) + geom_bar(position="stack",stat="identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
#scale_y_continuous(breaks=seq(0,9000,1000)) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
ggplot(data=subset(sra_tissue,tissue_main=="other"), aes(x=disease,y=samples,fill=tissue)) + geom_bar(position="stack",stat="identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
#scale_y_continuous(breaks=seq(0,9000,1000)) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,250,10)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
df2=sra_tissue%>%
group_by(tissue)%>%
summarise(samples=sum(samples))
#Figure S6----------organism------
sra_organism <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_organism.tsv")
sra_organism=as.data.frame(sra_organism)
sra_organism$organism=as.character(sra_organism$organism)
sra_organism$disease=as.character(sra_organism$disease)
sra_organism$samples=as.integer(sra_organism$samples)
sra_organism[sra_organism$organism=="klebsiella pneumoniae",]$organism="bacteria"
sra_organism[sra_organism$organism=="pseudomonas aeruginosa",]$organism="bacteria"
sra_organism[sra_organism$organism=="staphylococcus aureus",]$organism="bacteria"
sra_organism[sra_organism$organism=="staphylococcus pseudintermedius",]$organism="bacteria"
sra_organism[sra_organism$organism=="streptococcus pyogenes",]$organism="bacteria"
sra_organism[sra_organism$organism=="uncultured bacterium",]$organism="bacteria"
sra_organism[sra_organism$organism=="flavobacterium psychrophilum",]$organism="bacteria"
sra_organism[sra_organism$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_organism[sra_organism$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_organism[sra_organism$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_organism%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_organism$disease=factor(sra_organism$disease,levels=df2[order(df2$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_organism, aes(x=disease,y=samples,fill=organism)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df2, aes(x=disease,y=9000,label=samples,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure S7--------phenotype--------
sra_phenotype <- read.delim("/Users/sebastian/pubmed_rheuma_HTS/sra_phenotype.tsv")
sra_phenotype=as.data.frame(sra_phenotype)
sra_phenotype$phenotype=as.character(sra_phenotype$phenotype)
sra_phenotype$disease=as.character(sra_phenotype$disease)
sra_phenotype$samples=as.integer(sra_phenotype$samples)
sra_phenotype[sra_phenotype$disease=="MyoPolyDerma",]$disease="(poly/derma)myositis"
sra_phenotype[sra_phenotype$disease=="SysSclerosis",]$disease="Systemic Sclerosis"
sra_phenotype[sra_phenotype$disease=="Sjoegren",]$disease="Sjögren's Syndrome"
df2=sra_phenotype%>%
group_by(disease)%>%
summarise(samples=sum(samples))
sra_phenotype$disease=factor(sra_phenotype$disease,levels=df2[order(df2$samples,decreasing = TRUE),]$disease)
ggplot(data=sra_phenotype, aes(x=disease,y=samples,fill=phenotype)) + geom_bar(position="stack",stat="identity") +
geom_text(data = df2, aes(x=disease,y=9000,label=samples,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#------sra vs pubmed------
sra_pubmed <- read.delim("~/pubmed_rheuma_HTS/sra_pubmed_copy.tsv")
sra_pubmed=as.data.frame(sra_pubmed)
sra_pubmed$assay=as.character(sra_pubmed$assay)
sra_pubmed$disease=as.character(sra_pubmed$disease)
sra_pubmed$patient.information=as.character(sra_pubmed$patient.information)
SLE_pubmed=sra_pubmed[sra_pubmed$disease=="SLE",]
SLE_pubmed_rna_patient_info=SLE_pubmed[SLE_pubmed$assay %in% c("rna-seq","ncrna-seq","mirna-seq"),]
SLE_pubmed_rna=SLE_pubmed[SLE_pubmed$assay %in% c("rna-seq","ncrna-seq","mirna-seq"),]$pubmed
SLE_result_df=result_df[result_df$disease=="SLE",]
SLE_result_df_rna=SLE_result_df[SLE_result_df$assay %in% c("RNA-Seq","scRNA-Seq","Targeted DNA Seq,RNA-Seq","Epigenomics,RNA-Seq"),]$pmid
intersect(SLE_result_df_rna,SLE_pubmed_rna)
diff_pubmed_result_df=setdiff(SLE_result_df_rna,SLE_pubmed_rna)
sra_pubmed_missing <- read.delim("~/pubmed_rheuma_HTS/data_availability_sle.txt")
setdiff(diff_pubmed_result_df,sra_pubmed_missing$pmid)
#export results
write.csv2(SLE_pubmed_rna_patient_info,"~/pubmed_rheuma_HTS/SLE_pubmed_rna_patient_info.csv")
#Figure S12-----plot patient information---------------------------
df2=SLE_pubmed_rna_patient_info%>%
count(patient.information,name="number")
SLE_pubmed_rna_patient_info$patient.information=factor(SLE_pubmed_rna_patient_info$patient.information,levels=df2[order(df2$number,decreasing = TRUE),]$patient.information)
ggplot(data=SLE_pubmed_rna_patient_info, aes(x=patient.information)) + geom_bar() +
geom_text(data = df2, aes(x=patient.information,y=10,label=number),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + scale_y_continuous(breaks=seq(0,15,5)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
#Figure S8-----plot raw data availability---------------------------
df2=sra_pubmed_missing%>%
#group_by(availability)%>%
count(availability,name="number")
sra_pubmed_missing$availability=factor(sra_pubmed_missing$availability,levels=df2[order(df2$number,decreasing = TRUE),]$availability)
ggplot(data=sra_pubmed_missing, aes(x=availability)) + geom_bar() +
geom_text(data = df2, aes(x=availability,y=35,label=number,angle=60),inherit.aes = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size=13, colour="black"),
axis.title.x = element_blank(), axis.text.y=element_text(colour="black", size = 13), axis.title.y=element_text(colour="black", size = 14)) +
theme(legend.title = element_blank()) +
scale_fill_brewer(palette = "Paired") + #scale_y_continuous(breaks=seq(0,9000,1000)) +
theme(axis.line = element_line(size=1, colour = "black"), panel.grid.major = element_line(colour = "#d3d3d3"),
panel.grid.minor = element_blank(), panel.background = element_blank())
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_functions4Fitting.R
\name{rmarkovchain}
\alias{rmarkovchain}
\title{Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains.}
\usage{
rmarkovchain(n, object, what = "data.frame", useRCpp = TRUE,
parallel = FALSE, num.cores = NULL, ...)
}
\arguments{
\item{n}{Sample size}
\item{object}{Either a \code{markovchain} or a \code{markovchainList} object}
\item{what}{It specifies whether either a \code{data.frame} or a \code{matrix}
(each rows represent a simulation) or a \code{list} is returned.}
\item{useRCpp}{Boolean. Should RCpp fast implementation being used? Default is yes.}
\item{parallel}{Boolean. Should parallel implementation being used? Default is yes.}
\item{num.cores}{Number of Cores to be used}
\item{...}{additional parameters passed to the internal sampler}
}
\value{
Character Vector, data.frame, list or matrix
}
\description{
Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of
states coming from the underlying stationary distribution.
}
\details{
When a homogeneous process is assumed (\code{markovchain} object) a sequence is
sampled of size n. When a non - homogeneous process is assumed,
n samples are taken but the process is assumed to last from the begin to the end of the
non-homogeneous markov process.
}
\note{
Check the type of input
}
\examples{
# define the markovchain object
statesNames <- c("a", "b", "c")
mcB <- new("markovchain", states = statesNames,
transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1),
nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames)))
# show the sequence
outs <- rmarkovchain(n = 100, object = mcB, what = "list")
#define markovchainList object
statesNames <- c("a", "b", "c")
mcA <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mcB <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mcC <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC))
# show the list of sequence
rmarkovchain(100, mclist, "list")
}
\references{
A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010
}
\seealso{
\code{\link{markovchainFit}}, \code{\link{markovchainSequence}}
}
\author{
Giorgio Spedicato
}
| /man/rmarkovchain.Rd | no_license | abelborges/markovchain | R | false | true | 2,779 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_functions4Fitting.R
\name{rmarkovchain}
\alias{rmarkovchain}
\title{Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains.}
\usage{
rmarkovchain(n, object, what = "data.frame", useRCpp = TRUE,
parallel = FALSE, num.cores = NULL, ...)
}
\arguments{
\item{n}{Sample size}
\item{object}{Either a \code{markovchain} or a \code{markovchainList} object}
\item{what}{It specifies whether either a \code{data.frame} or a \code{matrix}
(each rows represent a simulation) or a \code{list} is returned.}
\item{useRCpp}{Boolean. Should RCpp fast implementation being used? Default is yes.}
\item{parallel}{Boolean. Should parallel implementation being used? Default is yes.}
\item{num.cores}{Number of Cores to be used}
\item{...}{additional parameters passed to the internal sampler}
}
\value{
Character Vector, data.frame, list or matrix
}
\description{
Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of
states coming from the underlying stationary distribution.
}
\details{
When a homogeneous process is assumed (\code{markovchain} object) a sequence is
sampled of size n. When a non - homogeneous process is assumed,
n samples are taken but the process is assumed to last from the begin to the end of the
non-homogeneous markov process.
}
\note{
Check the type of input
}
\examples{
# define the markovchain object
statesNames <- c("a", "b", "c")
mcB <- new("markovchain", states = statesNames,
transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1),
nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames)))
# show the sequence
outs <- rmarkovchain(n = 100, object = mcB, what = "list")
#define markovchainList object
statesNames <- c("a", "b", "c")
mcA <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mcB <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mcC <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC))
# show the list of sequence
rmarkovchain(100, mclist, "list")
}
\references{
A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010
}
\seealso{
\code{\link{markovchainFit}}, \code{\link{markovchainSequence}}
}
\author{
Giorgio Spedicato
}
|
library(shiny)
library(shinydashboard)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
dashboardPage(skin = "green",
dashboardHeader(title = "Dashboard"),
dashboardSidebar(
sidebarMenu(
menuItem("Overview", tabName = "Overview", icon = icon("th")),
menuItem("Lifestyle", tabName = "Lifestyle", icon = icon("home")),
menuItem("Survey_Characteristics", tabName = "Survey_Characteristics", icon = icon("dashboard")),
menuItem("Greenhouse_Gases", tabName = "Greenhouse_Gases", icon = icon("leaf")),
menuItem("Naive_Bayes", tabName = "Naive_Bayes", icon = icon("car"))
)
),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "Overview",
titlePanel(h1(strong("What you thought you knew..."))),
p("Survey aims to anwer the following questions:"),
strong(p("Does the age range influence the lifestyle of an individual?")),
strong(p("What is the distinct characteristics of our participants?")),
strong(p("Are vegetarians more environmentally conscious?")),
p("Finally, a predictive model to say if the person is vegetarian or not")),
tabItem(tabName = "Lifestyle",
h2("Bigram: Lifestyle"),
p("Group 1 - less than 30: Active, Healty but Partying"),
p("Group 2 - between 30 & 40: Stay balance, Extremely healthy, Prefers Gluten-Free & GMO-Free"),
p("Group 3 - more 40: Relaxation activities"),
selectInput(inputId="Choice",label="Your option pls", choices = c("< 30", "30-40", ">40"),selected = NULL,
multiple = FALSE, selectize = TRUE, width = NULL, size = NULL),
mainPanel(
plotOutput("bigramnet"))),
tabItem(tabName = "Survey_Characteristics",
mainPanel(
h2("TF-IDF: Survey Characteristics"),
p("Our respondents are mainly young people aged from 25 till 30 striving to lead a healthy & balanced lifestyle while occasionally consuming both rice & also fried food"),
plotOutput("tfidf")
)
),
tabItem(tabName = "Greenhouse_Gases",
mainPanel(
h2("Sentiment Analysis: Greenhouse gases and Meals"),
p("Being vegetarian doesn't necessarily mean they are more aware of greenhouse gas emission")
),
selectInput(inputId="Decision", label="Vegetarian?", choices = c("Yes","No"), selected = NULL, multiple = FALSE,
selectize = TRUE, width = NULL, size = NULL),
mainPanel(
splitLayout(cellWidths = c("55%", "55%"), plotOutput("vegYorN"), plotOutput("QQ6")),
),
),
tabItem(tabName = "Naive_Bayes",
mainPanel(
h2("Naive Bayes Model"),
p("Likelihood of being a vegetarian or non-vegetarian by model with 83.3% accuracy"),
verbatimTextOutput("nv"))
)
)))))
| /Dashboard (ui).R | no_license | Sheethal-crypto/Natural-Language-Processing-Dashboard | R | false | false | 3,893 | r | library(shiny)
library(shinydashboard)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
dashboardPage(skin = "green",
dashboardHeader(title = "Dashboard"),
dashboardSidebar(
sidebarMenu(
menuItem("Overview", tabName = "Overview", icon = icon("th")),
menuItem("Lifestyle", tabName = "Lifestyle", icon = icon("home")),
menuItem("Survey_Characteristics", tabName = "Survey_Characteristics", icon = icon("dashboard")),
menuItem("Greenhouse_Gases", tabName = "Greenhouse_Gases", icon = icon("leaf")),
menuItem("Naive_Bayes", tabName = "Naive_Bayes", icon = icon("car"))
)
),
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "Overview",
titlePanel(h1(strong("What you thought you knew..."))),
p("Survey aims to anwer the following questions:"),
strong(p("Does the age range influence the lifestyle of an individual?")),
strong(p("What is the distinct characteristics of our participants?")),
strong(p("Are vegetarians more environmentally conscious?")),
p("Finally, a predictive model to say if the person is vegetarian or not")),
tabItem(tabName = "Lifestyle",
h2("Bigram: Lifestyle"),
p("Group 1 - less than 30: Active, Healty but Partying"),
p("Group 2 - between 30 & 40: Stay balance, Extremely healthy, Prefers Gluten-Free & GMO-Free"),
p("Group 3 - more 40: Relaxation activities"),
selectInput(inputId="Choice",label="Your option pls", choices = c("< 30", "30-40", ">40"),selected = NULL,
multiple = FALSE, selectize = TRUE, width = NULL, size = NULL),
mainPanel(
plotOutput("bigramnet"))),
tabItem(tabName = "Survey_Characteristics",
mainPanel(
h2("TF-IDF: Survey Characteristics"),
p("Our respondents are mainly young people aged from 25 till 30 striving to lead a healthy & balanced lifestyle while occasionally consuming both rice & also fried food"),
plotOutput("tfidf")
)
),
tabItem(tabName = "Greenhouse_Gases",
mainPanel(
h2("Sentiment Analysis: Greenhouse gases and Meals"),
p("Being vegetarian doesn't necessarily mean they are more aware of greenhouse gas emission")
),
selectInput(inputId="Decision", label="Vegetarian?", choices = c("Yes","No"), selected = NULL, multiple = FALSE,
selectize = TRUE, width = NULL, size = NULL),
mainPanel(
splitLayout(cellWidths = c("55%", "55%"), plotOutput("vegYorN"), plotOutput("QQ6")),
),
),
tabItem(tabName = "Naive_Bayes",
mainPanel(
h2("Naive Bayes Model"),
p("Likelihood of being a vegetarian or non-vegetarian by model with 83.3% accuracy"),
verbatimTextOutput("nv"))
)
)))))
|
\name{endosim}
\alias{endosim}
\docType{data}
\title{
Simulated endocrine data.
}
\description{
The \code{endosim} data set was simulated based on the data analyzed in Rodriguez-Alvarez et al. (2011a,b) and presented in Botana et al. (2007) and Tome et al. (2008). The aim of these studies was to use the Body Mass Index (BMI) to detect patients having a higher risk of cardiovascular
problems, ascertaining the possible effect of age and gender on the accuracy of this measure.
}
\usage{data(endosim)}
\format{
A data frame with 2840 observations on the following 4 variables.
\describe{
\item{\code{gender}}{patient's gender. Factor with \code{Male} and \code{Female} levels.}
\item{\code{age}}{patient's age.}
\item{\code{idf_status}}{true disease status (presence/absence of two of more cardiovascular risk factors according to the International Diabetes Federation). Numerical vector (0=absence, 1=presence).}
\item{\code{bmi}}{patient's body mass index.}
}
}
\source{
Botana, M.A., Mato, J.A., Cadarso-Suarez, C., Tome, M.A., Perez-Fernandez, R., Fernandez-Mario, A., Rego-Iraeta, A., Solache, I. (2007). Overweight, obesity and central obesity prevalences in the region of Galicia in Northwest Spain. Obesity and Metabolism, 3, 106--115.
Tome, M.A., Botana, M.A., Cadarso-Suarez, C., Rego-Iraeta, A., Fernandez-Mario, A., Mato, J.A, Solache, I., Perez-Fernandez, R. (2008). Prevalence of metabolic syndrome in Galicia (NW Spain) on four alternative definitions and association with insulin resistance. Journal of Endocrinological Investigation, 32, 505--511.
}
\references{
Rodriguez-Alvarez, M.X., Roca-Pardinas, J. and Cadarso-Suarez, C. (2011a). ROC curve and covariates: extending induced methodology to the non-parametric framework. Statistics and Computing, 21(4), 483--499.
Rodriguez- Alvarez, M.X., Roca-Pardinas, J. and Cadarso-Suarez, C. (2011b). A new flexible direct ROC regression model - Application to the detection of cardiovascular risk factors by anthropometric measures. Computational Statistics and Data Analysis, 55(12), 3257--3270.
}
\examples{
data(endosim)
summary(endosim)
}
\keyword{datasets}
| /man/endosim.Rd | no_license | cran/npROCRegression | R | false | false | 2,191 | rd | \name{endosim}
\alias{endosim}
\docType{data}
\title{
Simulated endocrine data.
}
\description{
The \code{endosim} data set was simulated based on the data analyzed in Rodriguez-Alvarez et al. (2011a,b) and presented in Botana et al. (2007) and Tome et al. (2008). The aim of these studies was to use the Body Mass Index (BMI) to detect patients having a higher risk of cardiovascular
problems, ascertaining the possible effect of age and gender on the accuracy of this measure.
}
\usage{data(endosim)}
\format{
A data frame with 2840 observations on the following 4 variables.
\describe{
\item{\code{gender}}{patient's gender. Factor with \code{Male} and \code{Female} levels.}
\item{\code{age}}{patient's age.}
\item{\code{idf_status}}{true disease status (presence/absence of two of more cardiovascular risk factors according to the International Diabetes Federation). Numerical vector (0=absence, 1=presence).}
\item{\code{bmi}}{patient's body mass index.}
}
}
\source{
Botana, M.A., Mato, J.A., Cadarso-Suarez, C., Tome, M.A., Perez-Fernandez, R., Fernandez-Mario, A., Rego-Iraeta, A., Solache, I. (2007). Overweight, obesity and central obesity prevalences in the region of Galicia in Northwest Spain. Obesity and Metabolism, 3, 106--115.
Tome, M.A., Botana, M.A., Cadarso-Suarez, C., Rego-Iraeta, A., Fernandez-Mario, A., Mato, J.A, Solache, I., Perez-Fernandez, R. (2008). Prevalence of metabolic syndrome in Galicia (NW Spain) on four alternative definitions and association with insulin resistance. Journal of Endocrinological Investigation, 32, 505--511.
}
\references{
Rodriguez-Alvarez, M.X., Roca-Pardinas, J. and Cadarso-Suarez, C. (2011a). ROC curve and covariates: extending induced methodology to the non-parametric framework. Statistics and Computing, 21(4), 483--499.
Rodriguez- Alvarez, M.X., Roca-Pardinas, J. and Cadarso-Suarez, C. (2011b). A new flexible direct ROC regression model - Application to the detection of cardiovascular risk factors by anthropometric measures. Computational Statistics and Data Analysis, 55(12), 3257--3270.
}
\examples{
data(endosim)
summary(endosim)
}
\keyword{datasets}
|
source("generateSubset.R")
##This method downloads/reads the file
subset <- generateSubset()
png("plot4.png", width=480, height=480)
datetime <- strptime(paste(subset$Date, subset$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
par(mfrow=c(2,2))
plot(datetime, subset$Global_active_power, type="l", xlab = "", ylab="Global Active Power(kilowatts)")
plot(datetime, as.numeric(subset$Voltage), type="l", xlab="datetime", ylab="Voltage")
plot(datetime, as.numeric(subset$Sub_metering_1), type="l", ylab="Energy sub metering", xlab ="")
lines(datetime, as.numeric(subset$Sub_metering_2), col='red')
lines(datetime, as.numeric(subset$Sub_metering_3), col='blue')
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(datetime, as.numeric(subset$Global_reactive_power), type="l", ylab="Global_reactive_power")
dev.off() | /Project1/plot4.R | no_license | leeandrew1693/ExploratoryDataAnalysis | R | false | false | 879 | r | source("generateSubset.R")
##This method downloads/reads the file
subset <- generateSubset()
png("plot4.png", width=480, height=480)
datetime <- strptime(paste(subset$Date, subset$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
par(mfrow=c(2,2))
plot(datetime, subset$Global_active_power, type="l", xlab = "", ylab="Global Active Power(kilowatts)")
plot(datetime, as.numeric(subset$Voltage), type="l", xlab="datetime", ylab="Voltage")
plot(datetime, as.numeric(subset$Sub_metering_1), type="l", ylab="Energy sub metering", xlab ="")
lines(datetime, as.numeric(subset$Sub_metering_2), col='red')
lines(datetime, as.numeric(subset$Sub_metering_3), col='blue')
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(datetime, as.numeric(subset$Global_reactive_power), type="l", ylab="Global_reactive_power")
dev.off() |
# dir.create("public")
setwd(here::here())
# CREATION INDENTATION -----------------------
dir.create("public/archives")
dir.create("public/matinees2021")
dir.create("public/ateliers")
dir.create("public/archives/matinees2021")
lapply(c("public",
"archives", "public/archives",
"ateliers", "public/ateliers",
"archives/matinees2021", "public/archives/matinees2021"), function(i){
file.copy(here::here('resources'), here::here(i), recursive = TRUE)
file.copy(here::here('css'), here::here(i), recursive = TRUE)
file.copy(here::here('insert-logo.html'), here::here(i), recursive = TRUE)
})
# LATEST A LA RACINE ------------------
lapply(list.files(pattern = "*.Rmd", recursive = FALSE), function(flname){
rmarkdown::render(flname)
flname_html <- gsub(".Rmd", ".html", flname)
file.copy(flname_html, paste0("public/", flname_html))
file.remove(flname_html)
})
# ARCHIVES ------------------
setwd(here::here("archives"))
## ROOT =====
lapply(list.files(pattern = "*.Rmd", recursive = FALSE), function(flname){
rmarkdown::render(flname)
flname_html <- gsub(".Rmd", ".html", flname)
file.copy(gsub(".Rmd", ".html", flname),
paste0(here::here("public","archives", flname_html))
)
file.remove(flname_html)
})
## MATINEES =====
setwd(here::here("archives", "matinees2021"))
lapply(list.files(pattern = "*.Rmd", recursive = FALSE), function(flname){
rmarkdown::render(flname)
flname_html <- gsub(".Rmd", ".html", flname)
file.copy(gsub(".Rmd", ".html", flname),
paste0(here::here("public","archives","matinees2021", flname_html))
)
file.remove(flname_html)
})
# ATELIERS ------------------
setwd(here::here())
source('ateliers/build.R')
setwd(here::here())
files <- list.files(pattern = "*.Rmd", full.names = FALSE, path = "ateliers")
lapply(files, function(flname){
flname_html <- gsub(".Rmd", ".html", flname)
file.copy(paste0("./ateliers/", flname_html), paste0("public/ateliers/", flname_html))
file.remove(paste0("./ateliers/", flname_html))
})
| /build/build.R | permissive | ddotta/utilitr-presentation | R | false | false | 2,063 | r | # dir.create("public")
setwd(here::here())
# CREATION INDENTATION -----------------------
dir.create("public/archives")
dir.create("public/matinees2021")
dir.create("public/ateliers")
dir.create("public/archives/matinees2021")
lapply(c("public",
"archives", "public/archives",
"ateliers", "public/ateliers",
"archives/matinees2021", "public/archives/matinees2021"), function(i){
file.copy(here::here('resources'), here::here(i), recursive = TRUE)
file.copy(here::here('css'), here::here(i), recursive = TRUE)
file.copy(here::here('insert-logo.html'), here::here(i), recursive = TRUE)
})
# LATEST A LA RACINE ------------------
lapply(list.files(pattern = "*.Rmd", recursive = FALSE), function(flname){
rmarkdown::render(flname)
flname_html <- gsub(".Rmd", ".html", flname)
file.copy(flname_html, paste0("public/", flname_html))
file.remove(flname_html)
})
# ARCHIVES ------------------
setwd(here::here("archives"))
## ROOT =====
lapply(list.files(pattern = "*.Rmd", recursive = FALSE), function(flname){
rmarkdown::render(flname)
flname_html <- gsub(".Rmd", ".html", flname)
file.copy(gsub(".Rmd", ".html", flname),
paste0(here::here("public","archives", flname_html))
)
file.remove(flname_html)
})
## MATINEES =====
setwd(here::here("archives", "matinees2021"))
lapply(list.files(pattern = "*.Rmd", recursive = FALSE), function(flname){
rmarkdown::render(flname)
flname_html <- gsub(".Rmd", ".html", flname)
file.copy(gsub(".Rmd", ".html", flname),
paste0(here::here("public","archives","matinees2021", flname_html))
)
file.remove(flname_html)
})
# ATELIERS ------------------
setwd(here::here())
source('ateliers/build.R')
setwd(here::here())
files <- list.files(pattern = "*.Rmd", full.names = FALSE, path = "ateliers")
lapply(files, function(flname){
flname_html <- gsub(".Rmd", ".html", flname)
file.copy(paste0("./ateliers/", flname_html), paste0("public/ateliers/", flname_html))
file.remove(paste0("./ateliers/", flname_html))
})
|
fsGLASSO <- function(mts, max.lag, rho, absolute = TRUE, show.progress = TRUE, localized = FALSE) {
k<-ncol(mts)
if (localized){
res<-matrix(0, k*max.lag, k)
for (i in 1:k){
dat <- composeYX(mts, i, max.lag)
dat.cov<-stats::cor(dat, use="pairwise.complete.obs")
gl<-glasso::glasso(dat.cov, rho=rho, penalize.diagonal=FALSE)
links<-gl$wi[1,-1]
res[,i] <- links
if (show.progress) svMisc::progress(100*i/k)
}
res <- fsNames(res, mts, max.lag)
}else{
dat<-mts
for (l in 1:max.lag){
dat<-cbind(dat[-nrow(dat),], mts[-c(1:l),])
}
dat.cov<-stats::cor(dat, use="pairwise.complete.obs")
gl<-glasso::glasso(dat.cov, rho=rho, penalize.diagonal=FALSE)
res<-gl$wi[-c(1:k),1:k]
res <- fsNames(res, mts, max.lag)
}
if (absolute) res <- abs(res)
return (res)
}
| /R/fsGLASSO.R | no_license | cran/fsMTS | R | false | false | 872 | r | fsGLASSO <- function(mts, max.lag, rho, absolute = TRUE, show.progress = TRUE, localized = FALSE) {
k<-ncol(mts)
if (localized){
res<-matrix(0, k*max.lag, k)
for (i in 1:k){
dat <- composeYX(mts, i, max.lag)
dat.cov<-stats::cor(dat, use="pairwise.complete.obs")
gl<-glasso::glasso(dat.cov, rho=rho, penalize.diagonal=FALSE)
links<-gl$wi[1,-1]
res[,i] <- links
if (show.progress) svMisc::progress(100*i/k)
}
res <- fsNames(res, mts, max.lag)
}else{
dat<-mts
for (l in 1:max.lag){
dat<-cbind(dat[-nrow(dat),], mts[-c(1:l),])
}
dat.cov<-stats::cor(dat, use="pairwise.complete.obs")
gl<-glasso::glasso(dat.cov, rho=rho, penalize.diagonal=FALSE)
res<-gl$wi[-c(1:k),1:k]
res <- fsNames(res, mts, max.lag)
}
if (absolute) res <- abs(res)
return (res)
}
|
rm(list=ls(all=TRUE))
gc()
#source("/home/mfader/_AndereProjekte/Trendy4/LUData/progsnew/_out/landuse_modify.r")
#source("/home/mfader/inputs_longheader/inputs_Trendy/landuse_modify.r")
#source("/home/mfader/_AndereProjekte/Trendy4/LUData/progsnew/_out/read.input.r")
#source("/home/mfader/_AndereProjekte/Trendy4/LUData/progsnew/_out/map.r")
library("fields")
landusefile<-"/home/mfader/_AndereProjekte/Trendy4/LUData/progsnew/_out/cft1700_2015_HYDE_short_corrected_iformat.bin"
#landusefile<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_short_corrected_iformat.bin"
#landusefile<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_Crops2Grass_RfIr_short_corrected_iformat.clm"
#landusefile<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_Crops2Grass_allRf_short_corrected_iformat.clm"
newlanduse<-"/home/mfader/_AndereProjekte/Trendy4/LUData/progsnew/_out/cft1700_2015_HYDE_short_corrected_iformat_1860_dummy.bin"
#newlanduse<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_short_corrected_iformat_1860_dummy.bin"
#newlanduse<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_Crops2Grass_RfIr_short_corrected_iformat_1860_dummy.bin"
#newlanduse<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_Crops2Grass_allRf_short_corrected_iformat_1860_dummy.bin"
read.input.header<-function(filename){
file.in<-file(filename,"rb")
name<-readChar(file.in,nchar=7)
version<-readBin(file.in,integer(),n=1,size=4)
order<-readBin(file.in,integer(),n=1,size=4)
firstyear<-readBin(file.in,integer(),n=1,size=4)
nyears<-readBin(file.in,integer(),n=1,size=4)
firstcell<-readBin(file.in,integer(),n=1,size=4)
ncells<-readBin(file.in,integer(),n=1,size=4)
nbands<-readBin(file.in,integer(),n=1,size=4)
cellsize<-readBin(file.in,numeric(),n=1,size=4)
scalar<-readBin(file.in,numeric(),n=1,size=4)
header<-data.frame(name,version,order,firstyear,nyears,firstcell,ncells,nbands,cellsize,scalar)
close(file.in)
return(header)
}
read.input.yearband<-function(filename,year,band, data.size){#year,band, start from 1
fileHeader<-read.input.header(filename)
data.year<-year-fileHeader$firstyear+1
file.in <- file(sprintf(filename),"rb")
data.in<-array(NA,dim=c(fileHeader$ncells))
seek(file.in,where=43+data.size*((data.year-1)*fileHeader$nband*fileHeader$ncells+(band-1)),origin="start")
for(i in 1:fileHeader$ncells){
data.in[i]<-readBin(file.in, integer(), n=1, size=data.size)
seek(file.in,where=(fileHeader$nbands-1)*2,origin="current")
}
close(file.in)
return(data.in)
}
header<-read.input.header(landusefile)
firstyear<-header$firstyear
nyear<-header$nyears
npix<-header$ncells
nbands<-header$nbands
cellsize<-header$cellsize
scalar<-header$scalar
print(paste("version",header$version,"order",header$order,"firstyear",header$firstyear,"nyears",header$nyears,"firstcell",
header$firstcell,"ncells",header$ncells,"nbands",header$nbands,"cellsize",header$cellsize,"scalar",header$scalar,sep=" "))
d1860_array<-array(NA,c(nbands,npix))
for(i in 1:nbands){
d1860_array[i,]<-read.input.yearband(landusefile,data.size=2,year=1860,band=i)
cat("\b\b\b\b\b\b\b\b\b\b\b",round(i/nbands*100),"%")
}
cat("\n")
d1860<-as.integer(as.vector(d1860_array))
#--------
#writing LPJ output
#--------
outname<-paste(newlanduse,sep="")
cat("writing LPJ input",outname,"...")
output<-file(outname, "w+b")
writeChar("LPJLUSE",output,eos=NULL) #header name
writeBin(as.integer(2),output,size=4) #header version
writeBin(as.integer(1),output,size=4) #order
writeBin(as.integer(firstyear),output,size=4) #firstyear
writeBin(as.integer(nyear),output,size=4) #nyear
writeBin(as.integer(0),output,size=4) #firstcell
writeBin(as.integer(npix),output,size=4) #ncell
writeBin(as.integer(nbands),output,size=4) #nbands
writeBin(as.numeric(cellsize),output,size=4) #cellsize
writeBin(as.numeric(scalar),output,size=4) #scalar
for(i in 1:nyear){
writeBin(d1860,output,size=2)
}
close(output)
cat("[done]\n")
for(i in 1:32){
may_old<-read.input.yearband(landusefile,data.size=2,year=1860,band=i)
may_new<-read.input.yearband(newlanduse,data.size=2,year=1700,band=i)
if(!all((may_old-may_new)==0))
cat("warning: [1]")
may_new<-read.input.yearband(newlanduse,data.size=2,year=2015,band=i)
if(!all((may_old-may_new)==0))
cat("warning: [2]")
may_new<-read.input.yearband(newlanduse,data.size=2,year=1750,band=i)
if(!all((may_old-may_new)==0))
cat("warning: [3]")
may_new<-read.input.yearband(newlanduse,data.size=2,year=1900,band=i)
if(!all((may_old-may_new)==0))
cat("warning: [4]")
may_new<-read.input.yearband(newlanduse,data.size=2,year=2000,band=i)
if(!all((may_old-may_new)==0))
cat("warning: [5]")
}
| /nela/landuse_modify.r | no_license | sinanshi/LPJU | R | false | false | 4,901 | r | rm(list=ls(all=TRUE))
gc()
#source("/home/mfader/_AndereProjekte/Trendy4/LUData/progsnew/_out/landuse_modify.r")
#source("/home/mfader/inputs_longheader/inputs_Trendy/landuse_modify.r")
#source("/home/mfader/_AndereProjekte/Trendy4/LUData/progsnew/_out/read.input.r")
#source("/home/mfader/_AndereProjekte/Trendy4/LUData/progsnew/_out/map.r")
library("fields")
landusefile<-"/home/mfader/_AndereProjekte/Trendy4/LUData/progsnew/_out/cft1700_2015_HYDE_short_corrected_iformat.bin"
#landusefile<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_short_corrected_iformat.bin"
#landusefile<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_Crops2Grass_RfIr_short_corrected_iformat.clm"
#landusefile<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_Crops2Grass_allRf_short_corrected_iformat.clm"
newlanduse<-"/home/mfader/_AndereProjekte/Trendy4/LUData/progsnew/_out/cft1700_2015_HYDE_short_corrected_iformat_1860_dummy.bin"
#newlanduse<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_short_corrected_iformat_1860_dummy.bin"
#newlanduse<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_Crops2Grass_RfIr_short_corrected_iformat_1860_dummy.bin"
#newlanduse<-"/home/mfader/inputs_longheader/inputs_Trendy/cft1700_2015_HYDE_Crops2Grass_allRf_short_corrected_iformat_1860_dummy.bin"
read.input.header<-function(filename){
file.in<-file(filename,"rb")
name<-readChar(file.in,nchar=7)
version<-readBin(file.in,integer(),n=1,size=4)
order<-readBin(file.in,integer(),n=1,size=4)
firstyear<-readBin(file.in,integer(),n=1,size=4)
nyears<-readBin(file.in,integer(),n=1,size=4)
firstcell<-readBin(file.in,integer(),n=1,size=4)
ncells<-readBin(file.in,integer(),n=1,size=4)
nbands<-readBin(file.in,integer(),n=1,size=4)
cellsize<-readBin(file.in,numeric(),n=1,size=4)
scalar<-readBin(file.in,numeric(),n=1,size=4)
header<-data.frame(name,version,order,firstyear,nyears,firstcell,ncells,nbands,cellsize,scalar)
close(file.in)
return(header)
}
read.input.yearband<-function(filename,year,band, data.size){#year,band, start from 1
fileHeader<-read.input.header(filename)
data.year<-year-fileHeader$firstyear+1
file.in <- file(sprintf(filename),"rb")
data.in<-array(NA,dim=c(fileHeader$ncells))
seek(file.in,where=43+data.size*((data.year-1)*fileHeader$nband*fileHeader$ncells+(band-1)),origin="start")
for(i in 1:fileHeader$ncells){
data.in[i]<-readBin(file.in, integer(), n=1, size=data.size)
seek(file.in,where=(fileHeader$nbands-1)*2,origin="current")
}
close(file.in)
return(data.in)
}
header<-read.input.header(landusefile)
firstyear<-header$firstyear
nyear<-header$nyears
npix<-header$ncells
nbands<-header$nbands
cellsize<-header$cellsize
scalar<-header$scalar
print(paste("version",header$version,"order",header$order,"firstyear",header$firstyear,"nyears",header$nyears,"firstcell",
header$firstcell,"ncells",header$ncells,"nbands",header$nbands,"cellsize",header$cellsize,"scalar",header$scalar,sep=" "))
d1860_array<-array(NA,c(nbands,npix))
for(i in 1:nbands){
d1860_array[i,]<-read.input.yearband(landusefile,data.size=2,year=1860,band=i)
cat("\b\b\b\b\b\b\b\b\b\b\b",round(i/nbands*100),"%")
}
cat("\n")
d1860<-as.integer(as.vector(d1860_array))
#--------
#writing LPJ output
#--------
outname<-paste(newlanduse,sep="")
cat("writing LPJ input",outname,"...")
output<-file(outname, "w+b")
writeChar("LPJLUSE",output,eos=NULL) #header name
writeBin(as.integer(2),output,size=4) #header version
writeBin(as.integer(1),output,size=4) #order
writeBin(as.integer(firstyear),output,size=4) #firstyear
writeBin(as.integer(nyear),output,size=4) #nyear
writeBin(as.integer(0),output,size=4) #firstcell
writeBin(as.integer(npix),output,size=4) #ncell
writeBin(as.integer(nbands),output,size=4) #nbands
writeBin(as.numeric(cellsize),output,size=4) #cellsize
writeBin(as.numeric(scalar),output,size=4) #scalar
for(i in 1:nyear){
writeBin(d1860,output,size=2)
}
close(output)
cat("[done]\n")
for(i in 1:32){
may_old<-read.input.yearband(landusefile,data.size=2,year=1860,band=i)
may_new<-read.input.yearband(newlanduse,data.size=2,year=1700,band=i)
if(!all((may_old-may_new)==0))
cat("warning: [1]")
may_new<-read.input.yearband(newlanduse,data.size=2,year=2015,band=i)
if(!all((may_old-may_new)==0))
cat("warning: [2]")
may_new<-read.input.yearband(newlanduse,data.size=2,year=1750,band=i)
if(!all((may_old-may_new)==0))
cat("warning: [3]")
may_new<-read.input.yearband(newlanduse,data.size=2,year=1900,band=i)
if(!all((may_old-may_new)==0))
cat("warning: [4]")
may_new<-read.input.yearband(newlanduse,data.size=2,year=2000,band=i)
if(!all((may_old-may_new)==0))
cat("warning: [5]")
}
|
context("Does variable importance work?")
library(caret)
library(randomForest)
test_that("We can get variable importance in ensembles", {
skip_on_cran()
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
# varImp struggles with the rf in our test suite, why?
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
expect_is(varImp(ens.class), "data.frame")
expect_is(varImp(ens.class, weight = TRUE), "data.frame")
expect_is(varImp(ens.class, scale = TRUE, weight = TRUE), "data.frame")
expect_is(varImp(ens.reg), "data.frame")
expect_is(varImp(ens.reg, weight = TRUE), "data.frame")
expect_is(varImp(ens.reg, scale = TRUE, weight = TRUE), "data.frame")
})
test_that("We get warnings when scale is set to FALSE and weight is TRUE", {
skip_on_cran()
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
# varImp struggles with the rf in our test suite, why?
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
gives_warning(varImp(ens.reg, scale = FALSE, weight = TRUE))
gives_warning(varImp(ens.class, scale = FALSE, weight = TRUE))
expect_warning(varImp(ens.reg, scale = FALSE, weight = TRUE),
"Weighting of unscaled")
expect_warning(varImp(ens.class, scale = FALSE, weight = TRUE),
"Weighting of unscaled")
gives_warning(varImp(ens.reg, scale = FALSE))
gives_warning(varImp(ens.class, scale = FALSE))
expect_warning(varImp(ens.reg, scale = FALSE),
"Weighting of unscaled")
expect_warning(varImp(ens.class, scale = FALSE),
"Weighting of unscaled")
})
test_that("We get the right dimensions back", {
skip_on_cran()
ncol1 <- 6
ncol2 <- 4
nrow1 <- 6
nrow2 <- 6
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
# varImp struggles with the rf in our test suite, why?
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
expect_equal(ncol(varImp(ens.class)), ncol1)
expect_equal(ncol(varImp(ens.class, weight = FALSE)), ncol1-1)
expect_equal(ncol(varImp(ens.class, weight = TRUE)), ncol1)
expect_equal(ncol(varImp(ens.reg)), ncol2)
expect_equal(ncol(varImp(ens.reg, weight = FALSE)), ncol2-1)
expect_equal(ncol(varImp(ens.reg, weight = TRUE)), ncol2)
expect_equal(nrow(varImp(ens.class)), nrow1)
expect_equal(nrow(varImp(ens.class, weight = FALSE)), nrow1)
expect_equal(nrow(varImp(ens.class, weight = TRUE)), nrow1)
expect_equal(nrow(varImp(ens.reg)), nrow2)
expect_equal(nrow(varImp(ens.reg, weight = FALSE)), nrow2)
expect_equal(nrow(varImp(ens.reg, weight = TRUE)), nrow2)
})
context("Do metric extraction functions work as expected")
test_that("Metric is used correctly", {
skip_on_cran()
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
# varImp struggles with the rf in our test suite, why?
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
expect_error(caretEnsemble:::getRMSE.train(ens.class$models[[1]]))
expect_error(caretEnsemble:::getRMSE.train(ens.class$models[[3]]))
expect_error(caretEnsemble:::getMetric.train(ens.class$models[[3]], metric = "RMSE"))
expect_error(caretEnsemble:::getAUC.train(ens.reg$models[[1]]))
expect_error(caretEnsemble:::getAUC.train(ens.reg$models[[2]]))
expect_error(caretEnsemble:::getMetric.train(ens.reg$models[[2]], metric = "AUC"))
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[1]]), 0.9287978, tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[2]]), 0.942959, tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[3]]), 0.9185977, tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[4]]), 0.9405823, tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[5]]), 0.9250347, tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[5]]),
caretEnsemble:::getMetric.train(ens.class$models[[5]], metric = "AUC"), tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[4]]),
caretEnsemble:::getMetric.train(ens.class$models[[4]], metric = "AUC"), tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[3]]),
caretEnsemble:::getMetric.train(ens.class$models[[3]], metric = "AUC"), tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[2]]),
caretEnsemble:::getMetric.train(ens.class$models[[2]], metric = "AUC"), tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[1]]),
caretEnsemble:::getMetric.train(ens.class$models[[1]], metric = "AUC"), tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[1]]), 0.3334612, tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[2]]), 0.324923, tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[3]]), 0.324923, tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[4]]), 0.3532128, tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[1]]),
caretEnsemble:::getMetric.train(models.reg[[1]], metric = "RMSE"), tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[2]]),
caretEnsemble:::getMetric.train(models.reg[[2]], metric = "RMSE"), tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[3]]),
caretEnsemble:::getMetric.train(models.reg[[3]], metric = "RMSE"), tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[4]]),
caretEnsemble:::getMetric.train(models.reg[[4]], metric = "RMSE"), tol = 0.025)
expect_error(caretEnsemble:::getMetric.train(models.reg[[1]], metric = "AUC"))
expect_error(caretEnsemble:::getMetric.train(models.reg[[2]], metric = "AUC"))
expect_error(caretEnsemble:::getMetric.train(models.reg[[3]], metric = "AUC"))
expect_error(caretEnsemble:::getMetric.train(models.reg[[4]], metric = "AUC"))
expect_error(caretEnsemble:::getMetric.train(models.class[[1]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetric.train(models.class[[2]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetric.train(models.class[[3]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetric.train(models.class[[4]], metric = "RMSE"))
expect_message(caretEnsemble:::getMetricSD.train(models.reg[[1]]))
expect_message(caretEnsemble:::getMetricSD.train(models.class[[1]]))
expect_error(caretEnsemble:::getMetricSD.train(models.reg[[1]], metric = "AUC"))
expect_error(caretEnsemble:::getMetricSD.train(models.reg[[2]], metric = "AUC"))
expect_error(caretEnsemble:::getMetricSD.train(models.reg[[3]], metric = "AUC"))
expect_error(caretEnsemble:::getMetricSD.train(models.reg[[4]], metric = "AUC"))
expect_error(caretEnsemble:::getMetricSD.train(models.class[[1]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetricSD.train(models.class[[2]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetricSD.train(models.class[[3]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetricSD.train(models.class[[4]], metric = "RMSE"))
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[1]]), 0.05873828, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[2]]), 0.05517874, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[3]]), 0.05517874, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[4]]), 0.07023269, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[4]]),
caretEnsemble:::getMetricSD.train(models.reg[[4]], metric = "RMSE"))
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[3]]),
caretEnsemble:::getMetricSD.train(models.reg[[3]], metric = "RMSE"))
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[2]]),
caretEnsemble:::getMetricSD.train(models.reg[[2]], metric = "RMSE"))
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[1]]),
caretEnsemble:::getMetricSD.train(models.reg[[1]], metric = "RMSE"))
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[1]]), 0.0582078, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[2]]), 0.05196865, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[3]]), 0.06356099, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[4]]), 0.07360202, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[4]]),
caretEnsemble:::getMetricSD.train(models.class[[4]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[3]]),
caretEnsemble:::getMetricSD.train(models.class[[3]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[2]]),
caretEnsemble:::getMetricSD.train(models.class[[2]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[1]]),
caretEnsemble:::getMetricSD.train(models.class[[1]], metric = "AUC"))
})
context("Metrics in student examples")
test_that("metrics work for AUC in imbalanced example", {
skip_on_cran()
load(system.file("testdata/studentEns.rda",
package="caretEnsemble", mustWork=TRUE))
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[1]]), 0.9340861, tol = 0.025)
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[2]]), 0.873687, tol = 0.025)
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[3]]), 0.8839286, tol = 0.025)
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[3]]),
caretEnsemble:::getMetric.train(studentEns$models[[3]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[2]]),
caretEnsemble:::getMetric.train(studentEns$models[[2]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[1]]),
caretEnsemble:::getMetric.train(studentEns$models[[1]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[1]]), 0.04611638, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[2]]), 0.03840437, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[3]]), 0.0144596, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[3]]),
caretEnsemble:::getMetricSD.train(studentEns$models[[3]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[2]]),
caretEnsemble:::getMetricSD.train(studentEns$models[[2]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[1]]),
caretEnsemble:::getMetricSD.train(studentEns$models[[1]], metric = "AUC"))
})
context("Testing caretEnsemble generics")
test_that("No errors are thrown by a generics for ensembles", {
skip_on_cran()
load(system.file("testdata/studentEns.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
# varImp struggles with the rf in our test suite, why?
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
expect_output(summary(ens.class), "AUC")
expect_output(summary(ens.reg), "RMSE")
expect_output(summary(studentEns), "AUC")
expect_is(plot(ens.class), "ggplot")
expect_is(plot(ens.reg), "ggplot")
expect_is(plot(ens.reg$models[[2]]), "trellis")
tp <- plot(ens.class)
tp2 <- plot(ens.reg)
expect_equal(nrow(tp$data), 5)
expect_equal(nrow(tp2$data), 2)
expect_equal(tp$data$method, names(ens.class$weights))
expect_equal(tp2$data$method, names(ens.reg$weights))
fort1 <- fortify(ens.class)
fort2 <- fortify(ens.reg)
expect_is(fort1, "data.frame")
expect_is(fort2, "data.frame")
expect_equal(nrow(fort1), 150)
expect_equal(nrow(fort2), 150)
expect_equal(ncol(fort1), 10)
expect_equal(ncol(fort2), 10)
expect_true(all(names(fort1) %in% names(fort2)))
test_plot_file <- "caretEnsemble_test_plots.png"
png(test_plot_file)
p1 <- autoplot(ens.class)
p2 <- autoplot(ens.reg)
p3 <- autoplot(ens.class, xvars=c("Petal.Length", "Petal.Width"))
p4 <- autoplot(ens.reg, xvars=c("Petal.Length", "Petal.Width"))
expect_error(autoplot(ens.reg$models[[1]]))
dev.off()
expect_true(file.exists(test_plot_file))
unlink(test_plot_file)
})
context("Residual extraction")
test_that("Residuals provided by residuals are proper for ensemble objects", {
skip_on_cran()
load(system.file("testdata/studentEns.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/X.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/Y.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/X.class.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/Y.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
residTest <- residuals(ens.class)
residTest2 <- residuals(ens.reg)
obs1 <- ifelse(Y.class == "No", 0, 1)
obs2 <- Y.reg
predTest <- predict(ens.class)
predTest2 <- predict(ens.reg)
expect_identical(residTest, obs1 - predTest)
expect_identical(residTest2, obs2 - predTest2)
expect_false(identical(residTest2, predTest2 -obs2))
expect_false(identical(residTest, predTest -obs1))
mr1 <- multiResiduals(ens.class)
mr2 <- multiResiduals(ens.reg)
expect_identical(names(mr1), names(mr2))
expect_identical(names(mr1), c("method", "id", "yhat", "resid", "y"))
expect_equal(nrow(mr1), 150 * length(ens.class$models))
expect_equal(nrow(mr2), 150 * length(ens.reg$models))
expect_equal(ncol(mr1), ncol(mr2))
mr1 <- mr1[order(mr1$method, mr1$id),]
mr2 <- mr2[order(mr2$method, mr2$id),]
mr2.tmp1 <- residuals(ens.reg$models[[1]])
attributes(mr2.tmp1) <- NULL
mr2.tmp2 <- residuals(ens.reg$models[[2]])
expect_true(identical(round(mr2[mr2$method == "lm", "resid"], 5), round(mr2.tmp1, 5)))
expect_true(identical(round(mr2[mr2$method == "knn", "resid"], 5), round(mr2.tmp2, 5)))
#I think the factors are backward somewhere in here
#Also, caret doesn't yet support residuals for classification
# mr_class_wide <- as.data.frame(lapply(ens.class$models, residuals))
# names(mr_class_wide) <- lapply(ens.class$models, function(x) x$method)
# mr_class_long <- reshape(mr_class_wide, direction = "long", varying = names(mr_class_wide),
# v.names = "resid", timevar = "method", times = names(mr_class_wide))
# expect_equal(mr_class_long[order(mr_class_long$method, mr_class_long$id),"resid"], -1*mr1[order(mr1$method, mr1$id),"resid"])
mr_reg_wide <- as.data.frame(lapply(ens.reg$models, residuals))
names(mr_reg_wide) <- lapply(ens.reg$models, function(x) x$method)
mr_reg_long <- reshape(mr_reg_wide, direction = "long", varying = names(mr_reg_wide),
v.names = "resid", timevar = "method", times = names(mr_reg_wide))
expect_equal(mr_reg_long[order(mr_reg_long$method, mr_reg_long$id),"resid"], mr2[order(mr2$method, mr2$id),"resid"])
ens.class2 <- ens.class
ens.reg2 <- ens.reg
ens.class2$modelType <- ens.reg2$modelType <- NULL
expect_equal(residuals(ens.class2), residuals(ens.class))
expect_equal(residuals(ens.reg2), residuals(ens.reg))
})
context("Does prediction method work for classification")
test_that("We can ensemble models and handle missingness across predictors", {
skip_on_cran()
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
modres1 <- caretEnsemble:::extractModRes(ens.class)
modres2 <- caretEnsemble:::extractModRes(ens.reg)
expect_false(identical(modres1[1, 2], max(ens.class$models[[1]]$results$Accuracy)))
expect_false(identical(modres2[1, 2], max(ens.reg$models[[1]]$results$RMSE)))
expect_false(identical(modres1[2, 2], max(ens.class$models[[2]]$results$Accuracy)))
expect_false(identical(modres2[2, 2], max(ens.reg$models[[2]]$results$RMSE)))
expect_false(identical(modres1[3, 2], max(ens.class$models[[3]]$results$Accuracy)))
expect_false(identical(modres1[4, 2], max(ens.class$models[[4]]$results$Accuracy)))
expect_false(identical(modres1[5, 2], max(ens.class$models[[5]]$results$Accuracy)))
expect_identical(modres1[1, 2], caretEnsemble:::getAUC.train(ens.class$models[[1]]))
expect_identical(modres1[2, 2], caretEnsemble:::getAUC.train(ens.class$models[[2]]))
expect_identical(modres1[3, 2], caretEnsemble:::getAUC.train(ens.class$models[[3]]))
expect_identical(modres1[4, 2], caretEnsemble:::getAUC.train(ens.class$models[[4]]))
expect_identical(modres1[5, 2], caretEnsemble:::getAUC.train(ens.class$models[[5]]))
expect_identical(modres1[1, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[1]], "AUC", which = "best"))
expect_identical(modres1[2, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[2]], "AUC", which = "best"))
expect_identical(modres1[3, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[3]], "AUC", which = "best"))
expect_identical(modres1[4, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[4]], "AUC", which = "best"))
expect_equal(modres1[5, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[5]], "AUC", which = "best"))
expect_identical(modres1[2, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[2]], "AUC", which = "all"))
expect_false(identical(modres1[3, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[3]],
"AUC", which = "all")))
expect_identical(modres2[1, 3], caretEnsemble:::getMetricSD.train(ens.reg$models[[1]], "RMSE", which = "best"))
expect_identical(modres2[2, 3], caretEnsemble:::getMetricSD.train(ens.reg$models[[2]], "RMSE", which = "best"))
expect_identical(modres2[1, 3], caretEnsemble:::getMetricSD.train(ens.reg$models[[1]], "RMSE", which = "all"))
expect_false(identical(modres2[2, 3], caretEnsemble:::getMetricSD.train(ens.reg$models[[2]],
"RMSE", which = "all")))
modF <- caretEnsemble:::extractModFrame(ens.class)
modF2 <- caretEnsemble:::extractModFrame(ens.reg)
expect_true(ncol(modF) > ncol(ens.class$models[[2]]$trainingData))
expect_true(ncol(modF2) > ncol(ens.reg$models[[1]]$trainingData))
expect_true(nrow(modF) == nrow(ens.class$models[[2]]$trainingData))
expect_true(nrow(modF2) == nrow(ens.reg$models[[1]]$trainingData))
})
context("Does prediction method work for regression")
test_that("We can ensemble models and handle missingness across predictors", {
skip_on_cran()
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/X.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/Y.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/X.class.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/Y.class.rda",
package="caretEnsemble", mustWork=TRUE))
ens.reg <- caretEnsemble(models.reg, iter=1000)
models.class2 <- models.class[c(2:5)]
class(models.class2) <- "caretList"
ens.class <- caretEnsemble(models.class2, iter=1000)
newDat <- ens.class$models[[4]]$trainingData
newDat[2, 2] <- NA
newDat[3, 3] <- NA
newDat[4, 4] <- NA
newDat <- newDat[1:10, ]
expect_error(predict(ens.class, newdata = newDat, return_weights=TRUE, se=FALSE, keepNA=TRUE))
expect_error(predict(ens.reg, newdata = newDat, return_weights=TRUE, se=TRUE, keepNA=TRUE))
expect_error(predict(ens.reg, newdata = newDat, return_weights=FALSE, se=FALSE, keepNA=TRUE))
expect_error(predict(ens.reg, newdata = newDat, return_weights=FALSE, se=TRUE, keepNA=TRUE))
})
#Reg tests
test_that("Prediction options are respected in regression and classification", {
skip_on_cran()
load(system.file("testdata/models.reg.rda", package="caretEnsemble", mustWork=TRUE))
ens.reg <- caretEnsemble(models.reg, iter=1000)
tests <- expand.grid(keepNA=0:1, se=0:1, return_weights=0:1)
tests <- data.frame(lapply(tests, as.logical))
for(i in 1:nrow(tests)){
p <- predict(
ens.reg,
keepNA=tests[i,"keepNA"],
se=tests[i,"se"],
return_weights=tests[i,"return_weights"]
)
if(tests[i,"se"]){
expect_is(p, "data.frame")
preds <- p
} else{
expect_is(p, "numeric")
preds <- p
}
if(tests[i,"return_weights"]){
expect_is(attr(preds, which = "weights"), "matrix")
} else{
expect_null(attr(preds, which = "weights"))
}
}
#Class tests
load(system.file("testdata/models.class.rda", package="caretEnsemble", mustWork=TRUE))
ens.class <- caretEnsemble(models.class, iter=1000)
tests <- expand.grid(keepNA=0:1, se=0:1, return_weights=0:1)
tests <- data.frame(lapply(tests, as.logical))
for(i in 1:nrow(tests)){
p <- predict(
ens.class,
keepNA=tests[i,"keepNA"],
se=tests[i,"se"],
return_weights=tests[i,"return_weights"]
)
if(tests[i,"se"]){
expect_is(p, "data.frame")
preds <- p
} else{
expect_is(p, "numeric")
preds <- p
}
if(tests[i, "keepNA"]){
expect_warning(predict(
ens.class,
keepNA=tests[i,"keepNA"],
se=tests[i,"se"],
return_weights=tests[i,"return_weights"]
), "argument keepNA is deprecated; missing data cannot be safely handled.")
}
if(tests[i,"return_weights"]){
expect_is(attr(preds, which = "weights"), "matrix")
} else{
expect_null(attr(preds, which = "weights"))
}
}
})
| /tests/testthat/test-ensembleMethods.R | permissive | npp97-field/caretEnsemble | R | false | false | 24,088 | r |
context("Does variable importance work?")
library(caret)
library(randomForest)
test_that("We can get variable importance in ensembles", {
skip_on_cran()
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
# varImp struggles with the rf in our test suite, why?
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
expect_is(varImp(ens.class), "data.frame")
expect_is(varImp(ens.class, weight = TRUE), "data.frame")
expect_is(varImp(ens.class, scale = TRUE, weight = TRUE), "data.frame")
expect_is(varImp(ens.reg), "data.frame")
expect_is(varImp(ens.reg, weight = TRUE), "data.frame")
expect_is(varImp(ens.reg, scale = TRUE, weight = TRUE), "data.frame")
})
test_that("We get warnings when scale is set to FALSE and weight is TRUE", {
skip_on_cran()
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
# varImp struggles with the rf in our test suite, why?
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
gives_warning(varImp(ens.reg, scale = FALSE, weight = TRUE))
gives_warning(varImp(ens.class, scale = FALSE, weight = TRUE))
expect_warning(varImp(ens.reg, scale = FALSE, weight = TRUE),
"Weighting of unscaled")
expect_warning(varImp(ens.class, scale = FALSE, weight = TRUE),
"Weighting of unscaled")
gives_warning(varImp(ens.reg, scale = FALSE))
gives_warning(varImp(ens.class, scale = FALSE))
expect_warning(varImp(ens.reg, scale = FALSE),
"Weighting of unscaled")
expect_warning(varImp(ens.class, scale = FALSE),
"Weighting of unscaled")
})
test_that("We get the right dimensions back", {
skip_on_cran()
ncol1 <- 6
ncol2 <- 4
nrow1 <- 6
nrow2 <- 6
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
# varImp struggles with the rf in our test suite, why?
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
expect_equal(ncol(varImp(ens.class)), ncol1)
expect_equal(ncol(varImp(ens.class, weight = FALSE)), ncol1-1)
expect_equal(ncol(varImp(ens.class, weight = TRUE)), ncol1)
expect_equal(ncol(varImp(ens.reg)), ncol2)
expect_equal(ncol(varImp(ens.reg, weight = FALSE)), ncol2-1)
expect_equal(ncol(varImp(ens.reg, weight = TRUE)), ncol2)
expect_equal(nrow(varImp(ens.class)), nrow1)
expect_equal(nrow(varImp(ens.class, weight = FALSE)), nrow1)
expect_equal(nrow(varImp(ens.class, weight = TRUE)), nrow1)
expect_equal(nrow(varImp(ens.reg)), nrow2)
expect_equal(nrow(varImp(ens.reg, weight = FALSE)), nrow2)
expect_equal(nrow(varImp(ens.reg, weight = TRUE)), nrow2)
})
context("Do metric extraction functions work as expected")
test_that("Metric is used correctly", {
skip_on_cran()
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
# varImp struggles with the rf in our test suite, why?
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
expect_error(caretEnsemble:::getRMSE.train(ens.class$models[[1]]))
expect_error(caretEnsemble:::getRMSE.train(ens.class$models[[3]]))
expect_error(caretEnsemble:::getMetric.train(ens.class$models[[3]], metric = "RMSE"))
expect_error(caretEnsemble:::getAUC.train(ens.reg$models[[1]]))
expect_error(caretEnsemble:::getAUC.train(ens.reg$models[[2]]))
expect_error(caretEnsemble:::getMetric.train(ens.reg$models[[2]], metric = "AUC"))
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[1]]), 0.9287978, tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[2]]), 0.942959, tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[3]]), 0.9185977, tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[4]]), 0.9405823, tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[5]]), 0.9250347, tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[5]]),
caretEnsemble:::getMetric.train(ens.class$models[[5]], metric = "AUC"), tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[4]]),
caretEnsemble:::getMetric.train(ens.class$models[[4]], metric = "AUC"), tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[3]]),
caretEnsemble:::getMetric.train(ens.class$models[[3]], metric = "AUC"), tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[2]]),
caretEnsemble:::getMetric.train(ens.class$models[[2]], metric = "AUC"), tol = 0.025)
expect_equal(caretEnsemble:::getAUC.train(ens.class$models[[1]]),
caretEnsemble:::getMetric.train(ens.class$models[[1]], metric = "AUC"), tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[1]]), 0.3334612, tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[2]]), 0.324923, tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[3]]), 0.324923, tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[4]]), 0.3532128, tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[1]]),
caretEnsemble:::getMetric.train(models.reg[[1]], metric = "RMSE"), tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[2]]),
caretEnsemble:::getMetric.train(models.reg[[2]], metric = "RMSE"), tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[3]]),
caretEnsemble:::getMetric.train(models.reg[[3]], metric = "RMSE"), tol = 0.025)
expect_equal(caretEnsemble:::getRMSE.train(models.reg[[4]]),
caretEnsemble:::getMetric.train(models.reg[[4]], metric = "RMSE"), tol = 0.025)
expect_error(caretEnsemble:::getMetric.train(models.reg[[1]], metric = "AUC"))
expect_error(caretEnsemble:::getMetric.train(models.reg[[2]], metric = "AUC"))
expect_error(caretEnsemble:::getMetric.train(models.reg[[3]], metric = "AUC"))
expect_error(caretEnsemble:::getMetric.train(models.reg[[4]], metric = "AUC"))
expect_error(caretEnsemble:::getMetric.train(models.class[[1]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetric.train(models.class[[2]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetric.train(models.class[[3]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetric.train(models.class[[4]], metric = "RMSE"))
expect_message(caretEnsemble:::getMetricSD.train(models.reg[[1]]))
expect_message(caretEnsemble:::getMetricSD.train(models.class[[1]]))
expect_error(caretEnsemble:::getMetricSD.train(models.reg[[1]], metric = "AUC"))
expect_error(caretEnsemble:::getMetricSD.train(models.reg[[2]], metric = "AUC"))
expect_error(caretEnsemble:::getMetricSD.train(models.reg[[3]], metric = "AUC"))
expect_error(caretEnsemble:::getMetricSD.train(models.reg[[4]], metric = "AUC"))
expect_error(caretEnsemble:::getMetricSD.train(models.class[[1]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetricSD.train(models.class[[2]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetricSD.train(models.class[[3]], metric = "RMSE"))
expect_error(caretEnsemble:::getMetricSD.train(models.class[[4]], metric = "RMSE"))
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[1]]), 0.05873828, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[2]]), 0.05517874, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[3]]), 0.05517874, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[4]]), 0.07023269, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[4]]),
caretEnsemble:::getMetricSD.train(models.reg[[4]], metric = "RMSE"))
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[3]]),
caretEnsemble:::getMetricSD.train(models.reg[[3]], metric = "RMSE"))
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[2]]),
caretEnsemble:::getMetricSD.train(models.reg[[2]], metric = "RMSE"))
expect_equal(caretEnsemble:::getMetricSD.train(models.reg[[1]]),
caretEnsemble:::getMetricSD.train(models.reg[[1]], metric = "RMSE"))
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[1]]), 0.0582078, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[2]]), 0.05196865, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[3]]), 0.06356099, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[4]]), 0.07360202, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[4]]),
caretEnsemble:::getMetricSD.train(models.class[[4]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[3]]),
caretEnsemble:::getMetricSD.train(models.class[[3]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[2]]),
caretEnsemble:::getMetricSD.train(models.class[[2]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(models.class[[1]]),
caretEnsemble:::getMetricSD.train(models.class[[1]], metric = "AUC"))
})
context("Metrics in student examples")
test_that("metrics work for AUC in imbalanced example", {
skip_on_cran()
load(system.file("testdata/studentEns.rda",
package="caretEnsemble", mustWork=TRUE))
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[1]]), 0.9340861, tol = 0.025)
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[2]]), 0.873687, tol = 0.025)
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[3]]), 0.8839286, tol = 0.025)
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[3]]),
caretEnsemble:::getMetric.train(studentEns$models[[3]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[2]]),
caretEnsemble:::getMetric.train(studentEns$models[[2]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetric.train(studentEns$models[[1]]),
caretEnsemble:::getMetric.train(studentEns$models[[1]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[1]]), 0.04611638, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[2]]), 0.03840437, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[3]]), 0.0144596, tol = 0.025)
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[3]]),
caretEnsemble:::getMetricSD.train(studentEns$models[[3]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[2]]),
caretEnsemble:::getMetricSD.train(studentEns$models[[2]], metric = "AUC"))
expect_equal(caretEnsemble:::getMetricSD.train(studentEns$models[[1]]),
caretEnsemble:::getMetricSD.train(studentEns$models[[1]], metric = "AUC"))
})
context("Testing caretEnsemble generics")
test_that("No errors are thrown by a generics for ensembles", {
skip_on_cran()
load(system.file("testdata/studentEns.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
# varImp struggles with the rf in our test suite, why?
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
expect_output(summary(ens.class), "AUC")
expect_output(summary(ens.reg), "RMSE")
expect_output(summary(studentEns), "AUC")
expect_is(plot(ens.class), "ggplot")
expect_is(plot(ens.reg), "ggplot")
expect_is(plot(ens.reg$models[[2]]), "trellis")
tp <- plot(ens.class)
tp2 <- plot(ens.reg)
expect_equal(nrow(tp$data), 5)
expect_equal(nrow(tp2$data), 2)
expect_equal(tp$data$method, names(ens.class$weights))
expect_equal(tp2$data$method, names(ens.reg$weights))
fort1 <- fortify(ens.class)
fort2 <- fortify(ens.reg)
expect_is(fort1, "data.frame")
expect_is(fort2, "data.frame")
expect_equal(nrow(fort1), 150)
expect_equal(nrow(fort2), 150)
expect_equal(ncol(fort1), 10)
expect_equal(ncol(fort2), 10)
expect_true(all(names(fort1) %in% names(fort2)))
test_plot_file <- "caretEnsemble_test_plots.png"
png(test_plot_file)
p1 <- autoplot(ens.class)
p2 <- autoplot(ens.reg)
p3 <- autoplot(ens.class, xvars=c("Petal.Length", "Petal.Width"))
p4 <- autoplot(ens.reg, xvars=c("Petal.Length", "Petal.Width"))
expect_error(autoplot(ens.reg$models[[1]]))
dev.off()
expect_true(file.exists(test_plot_file))
unlink(test_plot_file)
})
context("Residual extraction")
test_that("Residuals provided by residuals are proper for ensemble objects", {
skip_on_cran()
load(system.file("testdata/studentEns.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/X.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/Y.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/X.class.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/Y.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
residTest <- residuals(ens.class)
residTest2 <- residuals(ens.reg)
obs1 <- ifelse(Y.class == "No", 0, 1)
obs2 <- Y.reg
predTest <- predict(ens.class)
predTest2 <- predict(ens.reg)
expect_identical(residTest, obs1 - predTest)
expect_identical(residTest2, obs2 - predTest2)
expect_false(identical(residTest2, predTest2 -obs2))
expect_false(identical(residTest, predTest -obs1))
mr1 <- multiResiduals(ens.class)
mr2 <- multiResiduals(ens.reg)
expect_identical(names(mr1), names(mr2))
expect_identical(names(mr1), c("method", "id", "yhat", "resid", "y"))
expect_equal(nrow(mr1), 150 * length(ens.class$models))
expect_equal(nrow(mr2), 150 * length(ens.reg$models))
expect_equal(ncol(mr1), ncol(mr2))
mr1 <- mr1[order(mr1$method, mr1$id),]
mr2 <- mr2[order(mr2$method, mr2$id),]
mr2.tmp1 <- residuals(ens.reg$models[[1]])
attributes(mr2.tmp1) <- NULL
mr2.tmp2 <- residuals(ens.reg$models[[2]])
expect_true(identical(round(mr2[mr2$method == "lm", "resid"], 5), round(mr2.tmp1, 5)))
expect_true(identical(round(mr2[mr2$method == "knn", "resid"], 5), round(mr2.tmp2, 5)))
#I think the factors are backward somewhere in here
#Also, caret doesn't yet support residuals for classification
# mr_class_wide <- as.data.frame(lapply(ens.class$models, residuals))
# names(mr_class_wide) <- lapply(ens.class$models, function(x) x$method)
# mr_class_long <- reshape(mr_class_wide, direction = "long", varying = names(mr_class_wide),
# v.names = "resid", timevar = "method", times = names(mr_class_wide))
# expect_equal(mr_class_long[order(mr_class_long$method, mr_class_long$id),"resid"], -1*mr1[order(mr1$method, mr1$id),"resid"])
mr_reg_wide <- as.data.frame(lapply(ens.reg$models, residuals))
names(mr_reg_wide) <- lapply(ens.reg$models, function(x) x$method)
mr_reg_long <- reshape(mr_reg_wide, direction = "long", varying = names(mr_reg_wide),
v.names = "resid", timevar = "method", times = names(mr_reg_wide))
expect_equal(mr_reg_long[order(mr_reg_long$method, mr_reg_long$id),"resid"], mr2[order(mr2$method, mr2$id),"resid"])
ens.class2 <- ens.class
ens.reg2 <- ens.reg
ens.class2$modelType <- ens.reg2$modelType <- NULL
expect_equal(residuals(ens.class2), residuals(ens.class))
expect_equal(residuals(ens.reg2), residuals(ens.reg))
})
context("Does prediction method work for classification")
test_that("We can ensemble models and handle missingness across predictors", {
skip_on_cran()
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
set.seed(2239)
ens.class <- caretEnsemble(models.class, iter=100)
models.subset <- models.reg[2:4]
class(models.subset) <- "caretList"
ens.reg <- caretEnsemble(models.subset, iter=100)
modres1 <- caretEnsemble:::extractModRes(ens.class)
modres2 <- caretEnsemble:::extractModRes(ens.reg)
expect_false(identical(modres1[1, 2], max(ens.class$models[[1]]$results$Accuracy)))
expect_false(identical(modres2[1, 2], max(ens.reg$models[[1]]$results$RMSE)))
expect_false(identical(modres1[2, 2], max(ens.class$models[[2]]$results$Accuracy)))
expect_false(identical(modres2[2, 2], max(ens.reg$models[[2]]$results$RMSE)))
expect_false(identical(modres1[3, 2], max(ens.class$models[[3]]$results$Accuracy)))
expect_false(identical(modres1[4, 2], max(ens.class$models[[4]]$results$Accuracy)))
expect_false(identical(modres1[5, 2], max(ens.class$models[[5]]$results$Accuracy)))
expect_identical(modres1[1, 2], caretEnsemble:::getAUC.train(ens.class$models[[1]]))
expect_identical(modres1[2, 2], caretEnsemble:::getAUC.train(ens.class$models[[2]]))
expect_identical(modres1[3, 2], caretEnsemble:::getAUC.train(ens.class$models[[3]]))
expect_identical(modres1[4, 2], caretEnsemble:::getAUC.train(ens.class$models[[4]]))
expect_identical(modres1[5, 2], caretEnsemble:::getAUC.train(ens.class$models[[5]]))
expect_identical(modres1[1, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[1]], "AUC", which = "best"))
expect_identical(modres1[2, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[2]], "AUC", which = "best"))
expect_identical(modres1[3, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[3]], "AUC", which = "best"))
expect_identical(modres1[4, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[4]], "AUC", which = "best"))
expect_equal(modres1[5, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[5]], "AUC", which = "best"))
expect_identical(modres1[2, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[2]], "AUC", which = "all"))
expect_false(identical(modres1[3, 3], caretEnsemble:::getMetricSD.train(ens.class$models[[3]],
"AUC", which = "all")))
expect_identical(modres2[1, 3], caretEnsemble:::getMetricSD.train(ens.reg$models[[1]], "RMSE", which = "best"))
expect_identical(modres2[2, 3], caretEnsemble:::getMetricSD.train(ens.reg$models[[2]], "RMSE", which = "best"))
expect_identical(modres2[1, 3], caretEnsemble:::getMetricSD.train(ens.reg$models[[1]], "RMSE", which = "all"))
expect_false(identical(modres2[2, 3], caretEnsemble:::getMetricSD.train(ens.reg$models[[2]],
"RMSE", which = "all")))
modF <- caretEnsemble:::extractModFrame(ens.class)
modF2 <- caretEnsemble:::extractModFrame(ens.reg)
expect_true(ncol(modF) > ncol(ens.class$models[[2]]$trainingData))
expect_true(ncol(modF2) > ncol(ens.reg$models[[1]]$trainingData))
expect_true(nrow(modF) == nrow(ens.class$models[[2]]$trainingData))
expect_true(nrow(modF2) == nrow(ens.reg$models[[1]]$trainingData))
})
context("Does prediction method work for regression")
test_that("We can ensemble models and handle missingness across predictors", {
skip_on_cran()
load(system.file("testdata/models.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/X.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/Y.reg.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/models.class.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/X.class.rda",
package="caretEnsemble", mustWork=TRUE))
load(system.file("testdata/Y.class.rda",
package="caretEnsemble", mustWork=TRUE))
ens.reg <- caretEnsemble(models.reg, iter=1000)
models.class2 <- models.class[c(2:5)]
class(models.class2) <- "caretList"
ens.class <- caretEnsemble(models.class2, iter=1000)
newDat <- ens.class$models[[4]]$trainingData
newDat[2, 2] <- NA
newDat[3, 3] <- NA
newDat[4, 4] <- NA
newDat <- newDat[1:10, ]
expect_error(predict(ens.class, newdata = newDat, return_weights=TRUE, se=FALSE, keepNA=TRUE))
expect_error(predict(ens.reg, newdata = newDat, return_weights=TRUE, se=TRUE, keepNA=TRUE))
expect_error(predict(ens.reg, newdata = newDat, return_weights=FALSE, se=FALSE, keepNA=TRUE))
expect_error(predict(ens.reg, newdata = newDat, return_weights=FALSE, se=TRUE, keepNA=TRUE))
})
#Reg tests
test_that("Prediction options are respected in regression and classification", {
skip_on_cran()
load(system.file("testdata/models.reg.rda", package="caretEnsemble", mustWork=TRUE))
ens.reg <- caretEnsemble(models.reg, iter=1000)
tests <- expand.grid(keepNA=0:1, se=0:1, return_weights=0:1)
tests <- data.frame(lapply(tests, as.logical))
for(i in 1:nrow(tests)){
p <- predict(
ens.reg,
keepNA=tests[i,"keepNA"],
se=tests[i,"se"],
return_weights=tests[i,"return_weights"]
)
if(tests[i,"se"]){
expect_is(p, "data.frame")
preds <- p
} else{
expect_is(p, "numeric")
preds <- p
}
if(tests[i,"return_weights"]){
expect_is(attr(preds, which = "weights"), "matrix")
} else{
expect_null(attr(preds, which = "weights"))
}
}
#Class tests
load(system.file("testdata/models.class.rda", package="caretEnsemble", mustWork=TRUE))
ens.class <- caretEnsemble(models.class, iter=1000)
tests <- expand.grid(keepNA=0:1, se=0:1, return_weights=0:1)
tests <- data.frame(lapply(tests, as.logical))
for(i in 1:nrow(tests)){
p <- predict(
ens.class,
keepNA=tests[i,"keepNA"],
se=tests[i,"se"],
return_weights=tests[i,"return_weights"]
)
if(tests[i,"se"]){
expect_is(p, "data.frame")
preds <- p
} else{
expect_is(p, "numeric")
preds <- p
}
if(tests[i, "keepNA"]){
expect_warning(predict(
ens.class,
keepNA=tests[i,"keepNA"],
se=tests[i,"se"],
return_weights=tests[i,"return_weights"]
), "argument keepNA is deprecated; missing data cannot be safely handled.")
}
if(tests[i,"return_weights"]){
expect_is(attr(preds, which = "weights"), "matrix")
} else{
expect_null(attr(preds, which = "weights"))
}
}
})
|
gllimIID1D = function(tapp,yapp,iR,in_K,in_r=NULL,maxiter=100,Lw=0,cstr=NULL,verb=0,in_theta=NULL){
# Adapted by F. Forbes from standard GLLiM
# Remark: hybrid case not done
# MaximizationIID Faster than first (old) version
# %%%%%%%% General EM Algorithm for Gaussian Locally Linear Mapping %%%%%%%%%
# %%% Author: Antoine Deleforge (April 2013) - antoine.deleforge@inria.fr %%%
# % Description: Compute maximum likelihood parameters theta and posterior
# % probabilities r=p(z_n=k|x_n,y_n;theta) of a gllim model with constraints
# % cstr using N associated observations t and y.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%% Input %%%%
# %- t (LtxN) % Training latent variables
# %- y (DxN) % Training observed variables
# DR x N , R iid replications (R=iR but R is already used)
# iR = number of replications
# %- in_K (int) % Initial number of components
# % <Optional>
# %- Lw (int) % Dimensionality of hidden components (default 0)
# %- maxiter (int) % Maximum number of iterations (default 100)
# %- in_theta (struct) % Initial parameters (default NULL)
# % | same structure as output theta
# %- in_r (NxK) % Initial assignments (default NULL)
# %- cstr (struct) % Constraints on parameters theta (default NULL,'')
# % - cstr$ct % fixed value (LtxK) or ''=uncons.
# % - cstr$cw % fixed value (LwxK) or ''=fixed to 0
# % - cstr$Gammat % fixed value (LtxLtxK) or ''=uncons.
# % | or {'','d','i'}{'','*','v'} [1]
# % - cstr$Gammaw % fixed value (LwxLwxK) or ''=fixed to I
# % - cstr$pi % fixed value (1xK) or ''=uncons. or '*'=equal
# % - cstr$A % fixed value (DxL) or ''=uncons.
# % - cstr$b % fixed value (DxK) or ''=uncons.
# % - cstr$Sigma % fixed value (DxDxK) or ''=uncons.
# % | or {'','d','i'}{'','*'} [1]
# %- verb {0,1,2} % Verbosity (default 1)
# %%%% Output %%%%
# %- theta (struct) % Estimated parameters (L=Lt+Lw)
# % - theta.c (LxK) % Gaussian means of X
# % - theta.Gamma (LxLxK) % Gaussian covariances of X
# % - theta.pi (1xK) % Gaussian weights of X
# % - theta.A (DxLxK) % Affine transformation matrices
# % - theta.b (DxK) % Affine transformation vectors
# % - theta.Sigma (DxDxK) % Error covariances
# %- r (NxK) % Posterior probabilities p(z_n=k|x_n,y_n;theta)
# %%% [1] 'd'=diag., 'i'=iso., '*'=equal for all k, 'v'=equal det. for all k
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % ======================Input Parameters Retrieval=========================
# A faire plus tard mais pas forcement indispensable maintenant?
# [Lw, maxiter, in_theta, in_r, cstr, verb] = ...
# process_options(varargin,'Lw',0,'maxiter',100,'in_theta',NULL,...
# 'in_r',NULL,'cstr',struct(),'verb',1);
# % ==========================Default Constraints============================
if(! "ct" %in% names(cstr)) cstr$ct=NULL;
if(! "cw" %in% names(cstr)) cstr$cw=NULL;
if(! "Gammat" %in% names(cstr)) cstr$Gammat=NULL;
if(! "Gammaw" %in% names(cstr)) cstr$Gammaw=NULL;
if(! "pi" %in% names(cstr)) cstr$pi=NULL;
if(! "A" %in% names(cstr)) cstr$A=NULL;
if(! "b" %in% names(cstr)) cstr$b=NULL;
if(! "Sigma" %in% names(cstr)) cstr$Sigma="i";
if (ncol(tapp) != ncol(yapp)) {stop("Observations must be in columns and variables in rows")}
# iid adptation
D= nrow(yapp)/iR # should be D= 1 in this case
N = ncol(yapp)
# yapp is a DR x N matrix turned into an array (D,N,R)
yappR<-array(0,c(D,N,iR))
for (i in 1:iR){
yappR[,,i]<-yapp[(1+D*(i-1)):(D*i),]
}
# D=1 needs the tranpose not to loose the matrix format
yapp1<-t(yappR[,,1]) # for emgmIID and init
ExpectationZIID = function(tapp,yapp,yappR,iR,th,verb){
if(verb>=1) print(' EZ');
if(verb>=3) print(' k=');
D= nrow(yapp)/iR
N = ncol(yapp)
K=length(th$pi);
Lt = nrow(tapp)
L = nrow(th$c)
Lw=L-Lt;
# D=1 needs the tranpose not tom loose the matrix format
yapp1<-t(yappR[,,1]) # for emgmIID and init
logr=matrix(NaN,N,K);
for (k in 1:K){
if(verb>=3) print(k);
# trick for D=1 to keep matrix mormat not here?
muyk=th$b[,k,drop=FALSE]; #% Dx1 here 1 x 1
covyk= th$Sigma[,,k]; #% DxD here 1 x 1
if(Lt>0)
{if (L==1) {Atk=th$A[,1:Lt,k,drop=FALSE];} else {Atk=th$A[,1:Lt,k]} #% DxLt
muyk= sweep(Atk%*%tapp,1,muyk,"+");#% DxN
}
# no need for hybrid case here
#if(Lw>0)
# {Awk=matrix(th$A[,(Lt+1):L,k,drop=FALSE],ncol=Lw,nrow=D); #% DxLw
# Gammawk=th$Gamma[(Lt+1):L,(Lt+1):L,k]; #% LwxLw
# cwk=th$c[(Lt+1):L,k]; #% Lwx1
# covyk=covyk+Awk%*%Gammawk%*%t(Awk); #% DxD
# muyk=sweep(muyk,1,Awk%*%cwk,"+"); #% DxN
#}
logr[,k] = log(th$pi[k]);#*rep(1,N); #N x K
# IID: changed rnk computation
sumRpdf<-0
# D=1 needs the tranpose not tom loose the matrix format
for (i in i:iR){sumRpdf<-sumRpdf+loggausspdf(t(yappR[,,i]),muyk,covyk)}
logr[,k] = logr[,k] + sumRpdf ;
# this part not changed,
if (Lt>0)
logr[,k] = logr[,k]+ loggausspdf(tapp,th$c[1:Lt,k,drop=FALSE],th$Gamma[1:Lt,1:Lt,k]);
} # end for on k
lognormr=logsumexp(logr,2);
LL=sum(lognormr);
r=exp(sweep(logr,1, lognormr,"-"));
# % remove empty clusters
ec=rep(TRUE,K); #% false if component k is empty.
for (k in 1:K){
if(sum(r[,k])==0 | !is.finite(sum(r[,k])))
{ec[k]=FALSE;
if(verb>=1) {print(paste(' WARNING: CLASS ',k,' HAS BEEN REMOVED'));}
}
} # end for on k
if (sum(ec)==0)
{print('REINIT! ');
# emgmIID yapp is DR x N should be ok to use only the first replicate yapp1 for init
# r = emgmIID(rbind(tapp,yapp), K, 2, verb)$R;
r = emgmIID(rbind(tapp,yapp1), K, 2, verb)$R;
ec=rep(TRUE,ncol(r));} else {r=r[,ec];}
return(list(r=r,LL=LL,ec=ec))
}
# not used
#ExpectationW=function(tapp,yapp,th,verb){
# if(verb>=1) print(' EW');
# if(verb>=3) print(' k=');
# D = nrow(yapp) ; N=ncol(yapp)
# K=length(th$pi);
# Lt = nrow(tapp);
# L = nrow(th$c)
# Lw=L-Lt;
# if(Lw==0)
# {muw=NULL;
# Sw=NULL;}
# Sw=array(0,dim=c(Lw,Lw,K));
# muw=array(0,dim=c(Lw,N,K));
# for (k in 1:K){
# if(verb>=3) print(k)
# Atk=th$A[,1:Lt,k]; #%DxLt
# Sigmak=th$Sigma[,,k]; #%DxD
# if (Lw==0)
# {Awk = NULL ; Gammawk=NULL ;cwk =NULL;invSwk=NULL} else {Awk=th$A[,(Lt+1):L,k]; Gammawk=th$Gamma[(Lt+1):L,(Lt+1):L,k];cwk=th$c[(Lt+1):L,k];invSwk=diag(Lw)+tcrossprod(Gammawk,Awk) %*% solve(Sigmak)%*%Awk;} #%DxLw # gerer le cas ou Lw=0 Matlab le fait tout seul
# if (!is.null(tapp))
# {Atkt=Atk%*%tapp;}
# else
# {Atkt=0;}
# if (Lw==0) {muw=NULL;Sw=NULL;} else {
# #invSwk\bsxfun(@plus,Gammawk*Awk'/Sigmak*bsxfun(@minus,y-Atkt,th.b(:,k)),cwk)
# muw[,,k]= solve(invSwk,sweep(Gammawk %*% t(Awk) %*% solve(Sigmak) %*% sweep(yapp-Atkt,1,th$b[,k],"-"),1,cwk,"+")); #%LwxN
# Sw[,,k]=solve(invSwk,Gammawk);}
# }
#return(list(muw=muw,Sw=Sw))
#}
# end not used
MaximizationIID = function(tapp,yapp,yappR, iR, r,muw,Sw,cstr,verb){
# yapp is a DR x N matrix turned into an array yappR (D,N,R)
if(verb>=1) print(' M');
if(verb>=3) print(' k=');
K = ncol(r);
D = nrow(yapp)/iR;
N=ncol(yapp)
Lt = nrow(tapp)
Lw = ifelse(is.null(muw),0,nrow(muw))
L=Lt+Lw;
th = list()
th$c=matrix(NaN,nrow=L,ncol=K)
th$Gamma=array(0,dim=c(L,L,K));
#if(Lw>0)
# {th$c[(Lt+1):L,]=cstr$cw; #% LwxK
# th$Gamma[(Lt+1):L,(Lt+1):L,]=cstr$Gammaw;} #% LwxLwxK}
th$pi=rep(NaN,K);
th$A=array(NaN,dim=c(D,L,K));
th$b=matrix(NaN,nrow=D,ncol=K);
th$Sigma= array(NaN,dim=c(D,D,K));
# IID case special here
#sumth = list()
sumthA=array(0,dim=c(D,L,K));
sumthb=matrix(0,nrow=D,ncol=K);
sumthSigma= array(0,dim=c(D,D,K));
# for each replication we can apply the usual gllim and then take the
# mean of the eastimated for bk,Sigmak. the ck, Gammak param do not change
# Attention Ak requires some care, also a mean in the end...
# xbar_k and ybar_k can be computed before, they do not vary with the replication
rk_bar=rep(0,K);
# rk = matrix(0,N,K)
yk_bar<-matrix(0,D,K)
xk_bar<-matrix(0,Lt,K)
for (k in 1:K){
rk=r[,k]; #% 1xN
rk_bar[k]=sum(rk); #% 1x1
yk_bartemp<-0
# trick to be improve to deal with case D=1
for (iirep in 1:iR){yk_bartemp<-yk_bartemp + rowSums(sweep(t(yappR[,,iirep]),2,rk,"*"))/rk_bar[k]}
yk_bar[,k]<-yk_bartemp/iR
#% Dx1
xk_bar[,k]<-rowSums(sweep(tapp,2,rk,"*"))/rk_bar[k] # L x 1
### M steps that do not depend on iR, ie pi_k, ck, Gammak
if(verb>=3) print(k);
# % Posteriors' sums
##rk=r[,k]; #% 1xN
##rk_bar[k]=sum(rk); #% 1x1
#debut if
if(Lt>0)
{
if(verb>=3) {print('c');}
#% Compute optimal mean ctk
if(is.null(cstr$ct))
{th$c[1:Lt,k]=rowSums(sweep(tapp,2,rk,"*"))/rk_bar[k];}# % Ltx1
else {th$c[1:Lt,k]=cstr$ct[,k];}
#% Compute optimal covariance matrix Gammatk
if(verb>=3) {print('Gt');}
diffGamma= sweep(sweep(tapp,1,th$c[1:Lt,k],"-"),2,sqrt(rk),"*"); #% LtxN
if( is.null(cstr$Gammat) || (length(cstr$Gammat)==1 & cstr$Gammat=='*')) # | ou ||?
# %%%% Full Gammat
# need trick D=1 with transpose?
{th$Gamma[1:Lt,1:Lt,k]=tcrossprod(diffGamma)/rk_bar[k]; #% DxD
}#th$Gamma[1:Lt,1:Lt,k]=th$Gamma[1:Lt,1:Lt,k];
else
{
if( !is.character(cstr$Gammat))
#%%%% Fixed Gammat
{th$Gamma[1:Lt,1:Lt,k]=cstr$Gammat[,,k]; }
else
{
if(cstr$Gammat[1]=='d' | cstr$Gammat[1]=='i')
#% Diagonal terms
{gamma2=rowSums(diffGamma^2)/rk_bar[k]; #%Ltx1
if(cstr$Gammat[1]=='d')
#%%% Diagonal Gamma
{th$Gamma[1:Lt,1:Lt,k]=diag(gamma2);} #% LtxLt
else
#%%% Isotropic Gamma
{th$Gamma[1:Lt,1:Lt,k]=mean(gamma2)*diag(Lt);} #% LtxLt
}
else
{if(cstr$Gammat[1]=='v')
#%%%% Full Gamma
{th$Gamma[1:Lt,1:Lt,k]=tcrossprod(diffGamma)/rk_bar[k];} #% LtxLt
else {# cstr$Gammat,
stop(' ERROR: invalid constraint on Gamma.'); }
}
}
}
} # fin if
# % Compute optimal weight pik
th$pi[k]=rk_bar[k]/N; #% 1x1
}
### M steps that depend on the yappR
for (irep in 1:iR){
#trick for D=1
yappi<-t(yappR[,,irep])
# not used but kept for the else # here x = tapp
if(Lw>0)
{x=rbind(tapp,muw[,,k]); #% LxN
Skx=rbind(cbind(matrix(0,Lt,Lt),matrix(0,Lt,Lw)),cbind(matrix(0,Lw,Lt),Sw[,,k])); }#% LxL
else
{x=tapp; #% LtxN
Skx=matrix(0,Lt,Lt);} #%LtxLt
# end if else
for (k in 1:K){
rk=r[,k]
if(verb>=3) {print('A');}
# if else
if(is.null(cstr$b))
{# % Compute weighted means of y and x
#### These means are changing in the iid case
#### yk_bar is now the mean over replications (iR)
#### This part has been factored out for speed...
if(L==0)
{xk_bar[,k]=NULL;}
}
else
#
{yk_bar[,k]=cstr$b[,k];
xk_bar[,k]=rep(0,L);
th$b[,k]=cstr$b[,k];
} # end if else
#% Compute weighted, mean centered y and x
weights=sqrt(rk); #% 1xN
# trick D=1 not here?
y_stark=sweep(yappi,1,t(yk_bar[,k]),"-"); #% DxN #col or row?
y_stark= sweep(y_stark,2,weights,"*"); #% DxN #col or row?
if(L>0)
{ x_stark=sweep(tapp,1,xk_bar[,k],"-"); #% LxN
x_stark= sweep(x_stark,2,weights,"*"); #% LxN
}
else
{x_stark=NULL;}
#% Robustly compute optimal transformation matrix Ak
#warning off MATLAB:nearlySingularMatrix;
if(!all(Skx==0))
{if(N>=L & det(Skx+tcrossprod(x_stark)) >10^(-8))
{th$A[,,k]=tcrossprod(y_stark,x_stark) %*% qr.solve(Skx+tcrossprod(x_stark));} #% DxL
else
{th$A[,,k]=tcrossprod(y_stark,x_stark) %*% ginv(Skx+tcrossprod(x_stark));} #%DxL
}
else
{if(!all(x_stark==0))
{if(N>=L & det(tcrossprod(x_stark))>10^(-8))
{th$A[,,k]=tcrossprod(y_stark,x_stark) %*% qr.solve(tcrossprod(x_stark));} #% DxL
else
{if(N<L && det(crossprod(x_stark))>10^(-8))
{th$A[,,k]=y_stark %*% solve(crossprod(x_stark)) %*% t(x_stark);} #% DxL
else
{if(verb>=3) print('p')
# trick D=1
th$A[,,k]=t(y_stark %*% ginv(x_stark));} #% DxL
}}
else
{#% Correspond to null variance in cluster k or L=0:
if(verb>=1 & L>0) print('null var\n');
th$A[,,k]=0; # % DxL
}
}
# end else
if(verb>=3)print('b');
# % Intermediate variable wk=y-Ak*x
if(L>0)
# trick D=1 not needed here?
{wk=yappi-th$A[,,k]%*%x;} #% DxN
else
{wk=yappi;}
#% Compute optimal transformation vector bk
if(is.null(cstr$b))
th$b[,k]=t(rowSums(sweep(wk,2,rk,"*")))/rk_bar[k]; #% Dx1
if(verb>=3) print('S');
#% Compute optimal covariance matrix Sigmak
# if(Lw>0)
# { Awk=th$A[,(Lt+1):L,k];
# Swk=Sw[,,k];
# ASAwk=Awk%*%tcrossprod(Swk,Awk);}
#else
ASAwk=0;
# trick D=1 not here
diffSigma=sweep(sweep(wk,1,t(th$b[,k]),"-"),2,sqrt(rk),"*"); #%DxN
if (cstr$Sigma %in% c("","*"))
{#%%%% Full Sigma
# trick D=1 not here?
th$Sigma[,,k]=tcrossprod(diffSigma)/rk_bar[k]; #% DxD
th$Sigma[,,k]=th$Sigma[,,k]+ASAwk; }
else
{
if(!is.character(cstr$Sigma))
#%%%% Fixed Sigma
{th$Sigma=cstr$Sigma;}
else {
if(cstr$Sigma[1]=='d' || cstr$Sigma[1]=='i')
#% Diagonal terms
# trick D=1
{sigma2=t(rowSums(diffSigma^2)/rk_bar[k]); #%Dx1
if(cstr$Sigma[1]=='d')
{#%%% Diagonal Sigma
# trick D=1
th$Sigma[,,k]=t(diag(sigma2,ncol=D,nrow=D)); #% DxD
if (is.null(dim(ASAwk))) {th$Sigma[,,k]=th$Sigma[,,k] + diag(ASAwk,ncol=D,nrow=D)}
else {th$Sigma[,,k]=th$Sigma[,,k]+diag(diag(ASAwk));}
}
else
{#%%% Isotropic Sigma
th$Sigma[,,k]=mean(sigma2)*diag(D); #% DxD
if (is.null(dim(ASAwk))) {th$Sigma[,,k]=th$Sigma[,,k]+sum(diag(ASAwk,ncol=D,nrow=D))/D*diag(D);}
else {th$Sigma[,,k]=th$Sigma[,,k]+sum(diag(ASAwk))/D*diag(D);}
}
}
else { cstr$Sigma ;
stop(' ERROR: invalid constraint on Sigma.');}
}
}
#% Avoid numerical problems on covariances:
if(verb>=3) print('n');
if(! is.finite(sum(th$Gamma[1:Lt,1:Lt,k]))) {th$Gamma[1:Lt,1:Lt,k]=0;}
th$Gamma[1:Lt,1:Lt,k]=th$Gamma[1:Lt,1:Lt,k]+1e-8*diag(Lt);
if(! is.finite(sum(th$Sigma[,,k]))) {th$Sigma[,,k]=0;}
th$Sigma[,,k]=th$Sigma[,,k]+1e-8*diag(D);
if(verb>=3) print(',');
} # end for on k
if(verb>=3) print('end');
if (cstr$Sigma=="*")
{#%%% Equality constraint on Sigma
th$Sigma=sweep(th$Sigma ,3,rk_bar,"*");
th$Sigma=array(apply(th$Sigma,c(1,2),mean),dim=c(D,D,K))
}
if( !is.null(cstr$Gammat) && cstr$Gammat=='v')
{#%%% Equal volume constraint on Gamma
detG=rep(0,K);
for (k in 1:K){
if (D==1) {detG[k]=th$Gamma[1:Lt,1:Lt,k]}
else {detG[k]=det(th$Gamma[1:Lt,1:Lt,k]);} #% 1x1
th$Gamma[1:Lt,1:Lt,k] = th$Gamma[1:Lt,1:Lt,k] / detG[k]
}
th$Gamma[1:Lt,1:Lt,]=sum(detG^(1/Lt)*th$pi)*th$Gamma[1:Lt,1:Lt,];
}
if(is.character(cstr$Gammat) && !is.null(cstr$Gammat) && cstr$Gammat[length(cstr$Gammat)]=='*')
{#%%% Equality constraint on Gammat
for (k in 1:K){
th$Gamma[1:Lt,1:Lt,k]=th$Gamma[1:Lt,1:Lt,k]%*%diag(rk_bar);
th$Gamma[1:Lt,1:Lt,k]=matrix(1,Lt,Lt) * sum(th$Gamma[1:Lt,1:Lt,k])/N;
}
}
if( ! is.character(cstr$pi) || is.null(cstr$pi))
{if(! is.null(cstr$pi)) {th$pi=cstr$pi;}} else {
if (!is.null(cstr$pi) && cstr$pi[1]=='*')
{th$pi=1/K*rep(1,K);} else {stop(' ERROR: invalid constraint on pi.');}
}
sumthSigma <- sumthSigma + th$Sigma
sumthA <- sumthA + th$A
sumthb <- sumthb + th$b
} # end for on irep
th$A<-sumthA/iR
th$b<-sumthb/iR
th$Sigma<-sumthSigma/iR
#print(th$A[,,1])
return(th)
} # end function Maximization
remove_empty_clusters= function(th1,cstr1,ec){
th=th1;
cstr=cstr1;
if(sum(ec) != length(ec))
{if( !is.null(cstr$ct) && !is.character(cstr$ct))
cstr$ct=cstr$ct[,ec];
if(!is.null(cstr$cw) && !is.character(cstr$cw))
cstr$cw=cstr$cw[,ec];
if(!is.null(cstr$Gammat) && !is.character(cstr$Gammat))
cstr$Gammat=cstr$Gammat[,,ec];
if(!is.null(cstr$Gammaw) && !is.character(cstr$Gammaw))
cstr$Gammaw=cstr$Gammaw[,,ec];
if(!is.null(cstr$pi) && !is.character(cstr$pi))
cstr$pi=cstr$pi[,ec];
if(!is.null(cstr$A) && !is.character(cstr$A))
cstr$A=cstr$A[,,ec];
if(!is.null(cstr$b) && !is.character(cstr$b))
cstr$b=cstr$b[,ec];
if(!is.null(cstr$Sigma) && !is.character(cstr$Sigma))
cstr$Sigma=cstr$Sigma[,,ec];
th$c=th$c[,ec];
th$Gamma=th$Gamma[,,ec];
th$pi=th$pi[ec];
th$A=th$A[,,ec];
th$b=th$b[,ec];
th$Sigma=th$Sigma[,,ec];
}
return(list(th=th,cstr=cstr))
}
# % ==========================EM initialization==============================
Lt=nrow(tapp)
L=Lt+Lw;
D = nrow(yapp)/iR ; N = ncol(yapp);
if(verb>=1) {print('EM Initializations');}
if(!is.null(in_theta)) {
theta=in_theta;
K=length(theta$pi);
if(is.null(cstr$cw))
{cstr$cw=matrix(0,L,K);} # % Default value for cw
if(is.null(cstr$Gammaw))
{ cstr$Gammaw=array(diag(Lw),dim=c(Lw,Lw,K));} #% Default value for Gammaw
tmp = ExpectationZIID(tapp,yapp,yappR,iR,theta,verb);
r = tmp$r ;
ec = tmp$ec ;
tmp = remove_empty_clusters(theta,cstr,ec);
theta = tmp$theta ;
cstr = tmp$cstr ;
#tmp = ExpectationW(tapp,yapp,theta,verb);
#muw = tmp$muw
#Sw = tmp$Sw
if(verb>=1) print("");
} else {if(is.null(in_r)){ # en commentaire dans le code original : pourquoi?
# % Initialise posteriors with K-means + GMM on joint observed data
# % [~,C] = kmeans([t;y]',in_K);
# % [~, ~, ~, r] = emgm([t;y], C', 3, verb);
# % [~, ~, ~, r] = emgm([t;y], in_K, 3, verb);
r = emgmIID(rbind(tapp,yapp1), in_K, 1000, verb=verb)$R;
# % [~,cluster_idx]=max(r,NULL,2);
# % fig=figure;clf(fig);
# % scatter(t(1,:),t(2,:),200,cluster_idx','filled');
# % weight=model.weight;
# % K=length(weight);
# % mutt=model.mu(1:Lt,:);
# % Sigmatt=model.Sigma[1:Lt,1:Lt,];
# % normr=zeros(N,1);
# % r=zeros(N,K);
# % for k=1:K
# % r[,k]=weight[k]*mvnpdf(t',mutt[,k]',reshape(Sigmatt[,,k],Lt,Lt));
# % normr=normr+reshape(r[,k],N,1);
# % end
# % r=sweep(@rdivide,r,normr);
# % fig=figure;clf(fig);
# % [~,classes]=max(r,NULL,2); % Nx1
# % scatter(y(1,:),y(2,:),200,classes','filled');
} else {r=in_r$R;}
if(Lw==0) {Sw=NULL; muw=NULL;}
#else {
# % Start by running an M-step without hidden variables (partial
# % theta), deduce Awk by local weighted PCA on residuals (complete
# % theta), deduce r, muw and Sw from E-steps on complete theta.
# theta = MaximizationIID(tapp,yapp,R,r,NULL,NULL,cstr,verb);
# #print(colMeans(theta$A)) OK no problem here : error is fater
# K=length(theta$pi);
# if(is.null(cstr$cw))
# {cstr$cw=matrix(0,Lw,K);}
# theta$c=rbind(theta$c,cstr$cw[,1:K]);
#Gammaf=array(0,dim=c(L,L,K));
# Gammaf[1:Lt,1:Lt,]=theta$Gamma;
# if(is.null(cstr$Gammaw))
# {cstr$Gammaw=array(diag(Lw),dim=c(Lw,Lw,K));}
#Gammaf[(Lt+1):L,(Lt+1):L,]=cstr$Gammaw[,,1:K]; #%LwxLwxK
# theta$Gamma=Gammaf;
# % Initialize Awk with local weighted PCAs on residuals:
# Aw=array(0,dim=c(D,Lw,K));
# for (k in 1:K)
# {rk_bar=sum(r[,k]);
# bk=theta$b[,k];
# w=sweep(yapp,1,bk,"-"); #%DxN
# if(Lt>0)
# {Ak=theta$A[,,k];
# w=w-Ak%*%tapp;}
# w=sweep(w,2,sqrt(r[,k]/rk_bar),"*"); #%DxN
# C=tcrossprod(w); #% Residual weighted covariance matrix
# tmp = eigen(C) ##svd?
# U = tmp$vectors[,1:Lw]
# Lambda = tmp$values[1:Lw] #% Weighted residual PCA U:DxLw
#% The residual variance is the discarded eigenvalues' mean
# sigma2k=(sum(diag(C))-sum(Lambda))/(D-Lw); #scalar
#print(sigma2k) #OK here
# theta$Sigma[,,k]=sigma2k * diag(D);
# Aw[,,k]=U%*%sqrt(diag(Lambda,ncol=length(Lambda),nrow=length(Lambda))-sigma2k*diag(Lw));}
#theta$A=abind(theta$A,Aw,along=2); #%DxLxK
#tmp = ExpectationZIID(tapp,yapp,R,theta,verb);
# r =tmp$r ;
# ec=tmp$ec;
# tmp = remove_empty_clusters(theta,cstr,ec);
# theta = tmp$th ;
# cstr = tmp$cstr ;
# tmp = ExpectationW(tapp,yapp,theta,verb);
# muw = tmp$muw ;
# Sw = tmp$Sw ;
# if(verb>=1) print("");
# } # end else Lw
}
# %===============================EM Iterations==============================
if(verb>=1) print(' Running EM');
LL = rep(-Inf,maxiter);
iter = 0;
converged= FALSE;
while ( !converged & iter<maxiter)
{iter = iter + 1;
if(verb>=1) print(paste(' Iteration ',iter,sep=""));
# % =====================MAXIMIZATION STEP===========================
theta = MaximizationIID(tapp,yapp,yappR,iR,r,muw,Sw,cstr,verb);
# % =====================EXPECTATION STEPS===========================
tmp = ExpectationZIID(tapp,yapp,yappR,iR,theta,verb);
r =tmp$r ;
LL[iter] =tmp$LL;
if (verb>=1) {print(LL[iter]);}
ec=tmp$ec
tmp = remove_empty_clusters(theta,cstr,ec);
theta = tmp$th
cstr = tmp$cstr
# tmp = ExpectationW(tapp,yapp,theta,verb);
# muw=tmp$muw
# Sw=tmp$Sw
if(iter>=3)
{deltaLL_total=max(LL[1:iter])-min(LL[1:iter]);
deltaLL=LL[iter]-LL[iter-1];
converged=(deltaLL <= (0.001*deltaLL_total));
}
if(verb>=1) print("");
}
# %%% Final log-likelihood %%%%
LLf=LL[iter];
# % =============================Final plots===============================
if(verb>=1) print(paste('Converged in ',iter,' iterations',sep=""));
# if(verb>=2)
# { plot(LL);
# cluster_idx=max.col(r);
# # % for d=1:D/2
# # % fig=figure;clf(fig);
# # % scatter(y(2*d-1,:),y(2*d,:),200,cluster_idx');
# # % end
# # % fig=figure;clf(fig);
# # % scatter(t(1,:),t(2,:),200,cluster_idx','filled');
# }
#}
theta$r = r
theta$LLf=LLf
theta$LL = LL[1:iter]
return(theta)
} | /Rfunctions/gllimIID1D.R | no_license | Trung-TinNGUYEN/GLLiM-ABC-v0 | R | false | false | 24,443 | r | gllimIID1D = function(tapp,yapp,iR,in_K,in_r=NULL,maxiter=100,Lw=0,cstr=NULL,verb=0,in_theta=NULL){
# Adapted by F. Forbes from standard GLLiM
# Remark: hybrid case not done
# MaximizationIID Faster than first (old) version
# %%%%%%%% General EM Algorithm for Gaussian Locally Linear Mapping %%%%%%%%%
# %%% Author: Antoine Deleforge (April 2013) - antoine.deleforge@inria.fr %%%
# % Description: Compute maximum likelihood parameters theta and posterior
# % probabilities r=p(z_n=k|x_n,y_n;theta) of a gllim model with constraints
# % cstr using N associated observations t and y.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%% Input %%%%
# %- t (LtxN) % Training latent variables
# %- y (DxN) % Training observed variables
# DR x N , R iid replications (R=iR but R is already used)
# iR = number of replications
# %- in_K (int) % Initial number of components
# % <Optional>
# %- Lw (int) % Dimensionality of hidden components (default 0)
# %- maxiter (int) % Maximum number of iterations (default 100)
# %- in_theta (struct) % Initial parameters (default NULL)
# % | same structure as output theta
# %- in_r (NxK) % Initial assignments (default NULL)
# %- cstr (struct) % Constraints on parameters theta (default NULL,'')
# % - cstr$ct % fixed value (LtxK) or ''=uncons.
# % - cstr$cw % fixed value (LwxK) or ''=fixed to 0
# % - cstr$Gammat % fixed value (LtxLtxK) or ''=uncons.
# % | or {'','d','i'}{'','*','v'} [1]
# % - cstr$Gammaw % fixed value (LwxLwxK) or ''=fixed to I
# % - cstr$pi % fixed value (1xK) or ''=uncons. or '*'=equal
# % - cstr$A % fixed value (DxL) or ''=uncons.
# % - cstr$b % fixed value (DxK) or ''=uncons.
# % - cstr$Sigma % fixed value (DxDxK) or ''=uncons.
# % | or {'','d','i'}{'','*'} [1]
# %- verb {0,1,2} % Verbosity (default 1)
# %%%% Output %%%%
# %- theta (struct) % Estimated parameters (L=Lt+Lw)
# % - theta.c (LxK) % Gaussian means of X
# % - theta.Gamma (LxLxK) % Gaussian covariances of X
# % - theta.pi (1xK) % Gaussian weights of X
# % - theta.A (DxLxK) % Affine transformation matrices
# % - theta.b (DxK) % Affine transformation vectors
# % - theta.Sigma (DxDxK) % Error covariances
# %- r (NxK) % Posterior probabilities p(z_n=k|x_n,y_n;theta)
# %%% [1] 'd'=diag., 'i'=iso., '*'=equal for all k, 'v'=equal det. for all k
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % ======================Input Parameters Retrieval=========================
# A faire plus tard mais pas forcement indispensable maintenant?
# [Lw, maxiter, in_theta, in_r, cstr, verb] = ...
# process_options(varargin,'Lw',0,'maxiter',100,'in_theta',NULL,...
# 'in_r',NULL,'cstr',struct(),'verb',1);
# % ==========================Default Constraints============================
if(! "ct" %in% names(cstr)) cstr$ct=NULL;
if(! "cw" %in% names(cstr)) cstr$cw=NULL;
if(! "Gammat" %in% names(cstr)) cstr$Gammat=NULL;
if(! "Gammaw" %in% names(cstr)) cstr$Gammaw=NULL;
if(! "pi" %in% names(cstr)) cstr$pi=NULL;
if(! "A" %in% names(cstr)) cstr$A=NULL;
if(! "b" %in% names(cstr)) cstr$b=NULL;
if(! "Sigma" %in% names(cstr)) cstr$Sigma="i";
if (ncol(tapp) != ncol(yapp)) {stop("Observations must be in columns and variables in rows")}
# iid adptation
D= nrow(yapp)/iR # should be D= 1 in this case
N = ncol(yapp)
# yapp is a DR x N matrix turned into an array (D,N,R)
yappR<-array(0,c(D,N,iR))
for (i in 1:iR){
yappR[,,i]<-yapp[(1+D*(i-1)):(D*i),]
}
# D=1 needs the tranpose not to loose the matrix format
yapp1<-t(yappR[,,1]) # for emgmIID and init
ExpectationZIID = function(tapp,yapp,yappR,iR,th,verb){
if(verb>=1) print(' EZ');
if(verb>=3) print(' k=');
D= nrow(yapp)/iR
N = ncol(yapp)
K=length(th$pi);
Lt = nrow(tapp)
L = nrow(th$c)
Lw=L-Lt;
# D=1 needs the tranpose not tom loose the matrix format
yapp1<-t(yappR[,,1]) # for emgmIID and init
logr=matrix(NaN,N,K);
for (k in 1:K){
if(verb>=3) print(k);
# trick for D=1 to keep matrix mormat not here?
muyk=th$b[,k,drop=FALSE]; #% Dx1 here 1 x 1
covyk= th$Sigma[,,k]; #% DxD here 1 x 1
if(Lt>0)
{if (L==1) {Atk=th$A[,1:Lt,k,drop=FALSE];} else {Atk=th$A[,1:Lt,k]} #% DxLt
muyk= sweep(Atk%*%tapp,1,muyk,"+");#% DxN
}
# no need for hybrid case here
#if(Lw>0)
# {Awk=matrix(th$A[,(Lt+1):L,k,drop=FALSE],ncol=Lw,nrow=D); #% DxLw
# Gammawk=th$Gamma[(Lt+1):L,(Lt+1):L,k]; #% LwxLw
# cwk=th$c[(Lt+1):L,k]; #% Lwx1
# covyk=covyk+Awk%*%Gammawk%*%t(Awk); #% DxD
# muyk=sweep(muyk,1,Awk%*%cwk,"+"); #% DxN
#}
logr[,k] = log(th$pi[k]);#*rep(1,N); #N x K
# IID: changed rnk computation
sumRpdf<-0
# D=1 needs the tranpose not tom loose the matrix format
for (i in i:iR){sumRpdf<-sumRpdf+loggausspdf(t(yappR[,,i]),muyk,covyk)}
logr[,k] = logr[,k] + sumRpdf ;
# this part not changed,
if (Lt>0)
logr[,k] = logr[,k]+ loggausspdf(tapp,th$c[1:Lt,k,drop=FALSE],th$Gamma[1:Lt,1:Lt,k]);
} # end for on k
lognormr=logsumexp(logr,2);
LL=sum(lognormr);
r=exp(sweep(logr,1, lognormr,"-"));
# % remove empty clusters
ec=rep(TRUE,K); #% false if component k is empty.
for (k in 1:K){
if(sum(r[,k])==0 | !is.finite(sum(r[,k])))
{ec[k]=FALSE;
if(verb>=1) {print(paste(' WARNING: CLASS ',k,' HAS BEEN REMOVED'));}
}
} # end for on k
if (sum(ec)==0)
{print('REINIT! ');
# emgmIID yapp is DR x N should be ok to use only the first replicate yapp1 for init
# r = emgmIID(rbind(tapp,yapp), K, 2, verb)$R;
r = emgmIID(rbind(tapp,yapp1), K, 2, verb)$R;
ec=rep(TRUE,ncol(r));} else {r=r[,ec];}
return(list(r=r,LL=LL,ec=ec))
}
# not used
#ExpectationW=function(tapp,yapp,th,verb){
# if(verb>=1) print(' EW');
# if(verb>=3) print(' k=');
# D = nrow(yapp) ; N=ncol(yapp)
# K=length(th$pi);
# Lt = nrow(tapp);
# L = nrow(th$c)
# Lw=L-Lt;
# if(Lw==0)
# {muw=NULL;
# Sw=NULL;}
# Sw=array(0,dim=c(Lw,Lw,K));
# muw=array(0,dim=c(Lw,N,K));
# for (k in 1:K){
# if(verb>=3) print(k)
# Atk=th$A[,1:Lt,k]; #%DxLt
# Sigmak=th$Sigma[,,k]; #%DxD
# if (Lw==0)
# {Awk = NULL ; Gammawk=NULL ;cwk =NULL;invSwk=NULL} else {Awk=th$A[,(Lt+1):L,k]; Gammawk=th$Gamma[(Lt+1):L,(Lt+1):L,k];cwk=th$c[(Lt+1):L,k];invSwk=diag(Lw)+tcrossprod(Gammawk,Awk) %*% solve(Sigmak)%*%Awk;} #%DxLw # gerer le cas ou Lw=0 Matlab le fait tout seul
# if (!is.null(tapp))
# {Atkt=Atk%*%tapp;}
# else
# {Atkt=0;}
# if (Lw==0) {muw=NULL;Sw=NULL;} else {
# #invSwk\bsxfun(@plus,Gammawk*Awk'/Sigmak*bsxfun(@minus,y-Atkt,th.b(:,k)),cwk)
# muw[,,k]= solve(invSwk,sweep(Gammawk %*% t(Awk) %*% solve(Sigmak) %*% sweep(yapp-Atkt,1,th$b[,k],"-"),1,cwk,"+")); #%LwxN
# Sw[,,k]=solve(invSwk,Gammawk);}
# }
#return(list(muw=muw,Sw=Sw))
#}
# end not used
MaximizationIID = function(tapp,yapp,yappR, iR, r,muw,Sw,cstr,verb){
# yapp is a DR x N matrix turned into an array yappR (D,N,R)
if(verb>=1) print(' M');
if(verb>=3) print(' k=');
K = ncol(r);
D = nrow(yapp)/iR;
N=ncol(yapp)
Lt = nrow(tapp)
Lw = ifelse(is.null(muw),0,nrow(muw))
L=Lt+Lw;
th = list()
th$c=matrix(NaN,nrow=L,ncol=K)
th$Gamma=array(0,dim=c(L,L,K));
#if(Lw>0)
# {th$c[(Lt+1):L,]=cstr$cw; #% LwxK
# th$Gamma[(Lt+1):L,(Lt+1):L,]=cstr$Gammaw;} #% LwxLwxK}
th$pi=rep(NaN,K);
th$A=array(NaN,dim=c(D,L,K));
th$b=matrix(NaN,nrow=D,ncol=K);
th$Sigma= array(NaN,dim=c(D,D,K));
# IID case special here
#sumth = list()
sumthA=array(0,dim=c(D,L,K));
sumthb=matrix(0,nrow=D,ncol=K);
sumthSigma= array(0,dim=c(D,D,K));
# for each replication we can apply the usual gllim and then take the
# mean of the eastimated for bk,Sigmak. the ck, Gammak param do not change
# Attention Ak requires some care, also a mean in the end...
# xbar_k and ybar_k can be computed before, they do not vary with the replication
rk_bar=rep(0,K);
# rk = matrix(0,N,K)
yk_bar<-matrix(0,D,K)
xk_bar<-matrix(0,Lt,K)
for (k in 1:K){
rk=r[,k]; #% 1xN
rk_bar[k]=sum(rk); #% 1x1
yk_bartemp<-0
# trick to be improve to deal with case D=1
for (iirep in 1:iR){yk_bartemp<-yk_bartemp + rowSums(sweep(t(yappR[,,iirep]),2,rk,"*"))/rk_bar[k]}
yk_bar[,k]<-yk_bartemp/iR
#% Dx1
xk_bar[,k]<-rowSums(sweep(tapp,2,rk,"*"))/rk_bar[k] # L x 1
### M steps that do not depend on iR, ie pi_k, ck, Gammak
if(verb>=3) print(k);
# % Posteriors' sums
##rk=r[,k]; #% 1xN
##rk_bar[k]=sum(rk); #% 1x1
#debut if
if(Lt>0)
{
if(verb>=3) {print('c');}
#% Compute optimal mean ctk
if(is.null(cstr$ct))
{th$c[1:Lt,k]=rowSums(sweep(tapp,2,rk,"*"))/rk_bar[k];}# % Ltx1
else {th$c[1:Lt,k]=cstr$ct[,k];}
#% Compute optimal covariance matrix Gammatk
if(verb>=3) {print('Gt');}
diffGamma= sweep(sweep(tapp,1,th$c[1:Lt,k],"-"),2,sqrt(rk),"*"); #% LtxN
if( is.null(cstr$Gammat) || (length(cstr$Gammat)==1 & cstr$Gammat=='*')) # | ou ||?
# %%%% Full Gammat
# need trick D=1 with transpose?
{th$Gamma[1:Lt,1:Lt,k]=tcrossprod(diffGamma)/rk_bar[k]; #% DxD
}#th$Gamma[1:Lt,1:Lt,k]=th$Gamma[1:Lt,1:Lt,k];
else
{
if( !is.character(cstr$Gammat))
#%%%% Fixed Gammat
{th$Gamma[1:Lt,1:Lt,k]=cstr$Gammat[,,k]; }
else
{
if(cstr$Gammat[1]=='d' | cstr$Gammat[1]=='i')
#% Diagonal terms
{gamma2=rowSums(diffGamma^2)/rk_bar[k]; #%Ltx1
if(cstr$Gammat[1]=='d')
#%%% Diagonal Gamma
{th$Gamma[1:Lt,1:Lt,k]=diag(gamma2);} #% LtxLt
else
#%%% Isotropic Gamma
{th$Gamma[1:Lt,1:Lt,k]=mean(gamma2)*diag(Lt);} #% LtxLt
}
else
{if(cstr$Gammat[1]=='v')
#%%%% Full Gamma
{th$Gamma[1:Lt,1:Lt,k]=tcrossprod(diffGamma)/rk_bar[k];} #% LtxLt
else {# cstr$Gammat,
stop(' ERROR: invalid constraint on Gamma.'); }
}
}
}
} # fin if
# % Compute optimal weight pik
th$pi[k]=rk_bar[k]/N; #% 1x1
}
### M steps that depend on the yappR
for (irep in 1:iR){
#trick for D=1
yappi<-t(yappR[,,irep])
# not used but kept for the else # here x = tapp
if(Lw>0)
{x=rbind(tapp,muw[,,k]); #% LxN
Skx=rbind(cbind(matrix(0,Lt,Lt),matrix(0,Lt,Lw)),cbind(matrix(0,Lw,Lt),Sw[,,k])); }#% LxL
else
{x=tapp; #% LtxN
Skx=matrix(0,Lt,Lt);} #%LtxLt
# end if else
for (k in 1:K){
rk=r[,k]
if(verb>=3) {print('A');}
# if else
if(is.null(cstr$b))
{# % Compute weighted means of y and x
#### These means are changing in the iid case
#### yk_bar is now the mean over replications (iR)
#### This part has been factored out for speed...
if(L==0)
{xk_bar[,k]=NULL;}
}
else
#
{yk_bar[,k]=cstr$b[,k];
xk_bar[,k]=rep(0,L);
th$b[,k]=cstr$b[,k];
} # end if else
#% Compute weighted, mean centered y and x
weights=sqrt(rk); #% 1xN
# trick D=1 not here?
y_stark=sweep(yappi,1,t(yk_bar[,k]),"-"); #% DxN #col or row?
y_stark= sweep(y_stark,2,weights,"*"); #% DxN #col or row?
if(L>0)
{ x_stark=sweep(tapp,1,xk_bar[,k],"-"); #% LxN
x_stark= sweep(x_stark,2,weights,"*"); #% LxN
}
else
{x_stark=NULL;}
#% Robustly compute optimal transformation matrix Ak
#warning off MATLAB:nearlySingularMatrix;
if(!all(Skx==0))
{if(N>=L & det(Skx+tcrossprod(x_stark)) >10^(-8))
{th$A[,,k]=tcrossprod(y_stark,x_stark) %*% qr.solve(Skx+tcrossprod(x_stark));} #% DxL
else
{th$A[,,k]=tcrossprod(y_stark,x_stark) %*% ginv(Skx+tcrossprod(x_stark));} #%DxL
}
else
{if(!all(x_stark==0))
{if(N>=L & det(tcrossprod(x_stark))>10^(-8))
{th$A[,,k]=tcrossprod(y_stark,x_stark) %*% qr.solve(tcrossprod(x_stark));} #% DxL
else
{if(N<L && det(crossprod(x_stark))>10^(-8))
{th$A[,,k]=y_stark %*% solve(crossprod(x_stark)) %*% t(x_stark);} #% DxL
else
{if(verb>=3) print('p')
# trick D=1
th$A[,,k]=t(y_stark %*% ginv(x_stark));} #% DxL
}}
else
{#% Correspond to null variance in cluster k or L=0:
if(verb>=1 & L>0) print('null var\n');
th$A[,,k]=0; # % DxL
}
}
# end else
if(verb>=3)print('b');
# % Intermediate variable wk=y-Ak*x
if(L>0)
# trick D=1 not needed here?
{wk=yappi-th$A[,,k]%*%x;} #% DxN
else
{wk=yappi;}
#% Compute optimal transformation vector bk
if(is.null(cstr$b))
th$b[,k]=t(rowSums(sweep(wk,2,rk,"*")))/rk_bar[k]; #% Dx1
if(verb>=3) print('S');
#% Compute optimal covariance matrix Sigmak
# if(Lw>0)
# { Awk=th$A[,(Lt+1):L,k];
# Swk=Sw[,,k];
# ASAwk=Awk%*%tcrossprod(Swk,Awk);}
#else
ASAwk=0;
# trick D=1 not here
diffSigma=sweep(sweep(wk,1,t(th$b[,k]),"-"),2,sqrt(rk),"*"); #%DxN
if (cstr$Sigma %in% c("","*"))
{#%%%% Full Sigma
# trick D=1 not here?
th$Sigma[,,k]=tcrossprod(diffSigma)/rk_bar[k]; #% DxD
th$Sigma[,,k]=th$Sigma[,,k]+ASAwk; }
else
{
if(!is.character(cstr$Sigma))
#%%%% Fixed Sigma
{th$Sigma=cstr$Sigma;}
else {
if(cstr$Sigma[1]=='d' || cstr$Sigma[1]=='i')
#% Diagonal terms
# trick D=1
{sigma2=t(rowSums(diffSigma^2)/rk_bar[k]); #%Dx1
if(cstr$Sigma[1]=='d')
{#%%% Diagonal Sigma
# trick D=1
th$Sigma[,,k]=t(diag(sigma2,ncol=D,nrow=D)); #% DxD
if (is.null(dim(ASAwk))) {th$Sigma[,,k]=th$Sigma[,,k] + diag(ASAwk,ncol=D,nrow=D)}
else {th$Sigma[,,k]=th$Sigma[,,k]+diag(diag(ASAwk));}
}
else
{#%%% Isotropic Sigma
th$Sigma[,,k]=mean(sigma2)*diag(D); #% DxD
if (is.null(dim(ASAwk))) {th$Sigma[,,k]=th$Sigma[,,k]+sum(diag(ASAwk,ncol=D,nrow=D))/D*diag(D);}
else {th$Sigma[,,k]=th$Sigma[,,k]+sum(diag(ASAwk))/D*diag(D);}
}
}
else { cstr$Sigma ;
stop(' ERROR: invalid constraint on Sigma.');}
}
}
#% Avoid numerical problems on covariances:
if(verb>=3) print('n');
if(! is.finite(sum(th$Gamma[1:Lt,1:Lt,k]))) {th$Gamma[1:Lt,1:Lt,k]=0;}
th$Gamma[1:Lt,1:Lt,k]=th$Gamma[1:Lt,1:Lt,k]+1e-8*diag(Lt);
if(! is.finite(sum(th$Sigma[,,k]))) {th$Sigma[,,k]=0;}
th$Sigma[,,k]=th$Sigma[,,k]+1e-8*diag(D);
if(verb>=3) print(',');
} # end for on k
if(verb>=3) print('end');
if (cstr$Sigma=="*")
{#%%% Equality constraint on Sigma
th$Sigma=sweep(th$Sigma ,3,rk_bar,"*");
th$Sigma=array(apply(th$Sigma,c(1,2),mean),dim=c(D,D,K))
}
if( !is.null(cstr$Gammat) && cstr$Gammat=='v')
{#%%% Equal volume constraint on Gamma
detG=rep(0,K);
for (k in 1:K){
if (D==1) {detG[k]=th$Gamma[1:Lt,1:Lt,k]}
else {detG[k]=det(th$Gamma[1:Lt,1:Lt,k]);} #% 1x1
th$Gamma[1:Lt,1:Lt,k] = th$Gamma[1:Lt,1:Lt,k] / detG[k]
}
th$Gamma[1:Lt,1:Lt,]=sum(detG^(1/Lt)*th$pi)*th$Gamma[1:Lt,1:Lt,];
}
if(is.character(cstr$Gammat) && !is.null(cstr$Gammat) && cstr$Gammat[length(cstr$Gammat)]=='*')
{#%%% Equality constraint on Gammat
for (k in 1:K){
th$Gamma[1:Lt,1:Lt,k]=th$Gamma[1:Lt,1:Lt,k]%*%diag(rk_bar);
th$Gamma[1:Lt,1:Lt,k]=matrix(1,Lt,Lt) * sum(th$Gamma[1:Lt,1:Lt,k])/N;
}
}
if( ! is.character(cstr$pi) || is.null(cstr$pi))
{if(! is.null(cstr$pi)) {th$pi=cstr$pi;}} else {
if (!is.null(cstr$pi) && cstr$pi[1]=='*')
{th$pi=1/K*rep(1,K);} else {stop(' ERROR: invalid constraint on pi.');}
}
sumthSigma <- sumthSigma + th$Sigma
sumthA <- sumthA + th$A
sumthb <- sumthb + th$b
} # end for on irep
th$A<-sumthA/iR
th$b<-sumthb/iR
th$Sigma<-sumthSigma/iR
#print(th$A[,,1])
return(th)
} # end function Maximization
remove_empty_clusters= function(th1,cstr1,ec){
th=th1;
cstr=cstr1;
if(sum(ec) != length(ec))
{if( !is.null(cstr$ct) && !is.character(cstr$ct))
cstr$ct=cstr$ct[,ec];
if(!is.null(cstr$cw) && !is.character(cstr$cw))
cstr$cw=cstr$cw[,ec];
if(!is.null(cstr$Gammat) && !is.character(cstr$Gammat))
cstr$Gammat=cstr$Gammat[,,ec];
if(!is.null(cstr$Gammaw) && !is.character(cstr$Gammaw))
cstr$Gammaw=cstr$Gammaw[,,ec];
if(!is.null(cstr$pi) && !is.character(cstr$pi))
cstr$pi=cstr$pi[,ec];
if(!is.null(cstr$A) && !is.character(cstr$A))
cstr$A=cstr$A[,,ec];
if(!is.null(cstr$b) && !is.character(cstr$b))
cstr$b=cstr$b[,ec];
if(!is.null(cstr$Sigma) && !is.character(cstr$Sigma))
cstr$Sigma=cstr$Sigma[,,ec];
th$c=th$c[,ec];
th$Gamma=th$Gamma[,,ec];
th$pi=th$pi[ec];
th$A=th$A[,,ec];
th$b=th$b[,ec];
th$Sigma=th$Sigma[,,ec];
}
return(list(th=th,cstr=cstr))
}
# % ==========================EM initialization==============================
Lt=nrow(tapp)
L=Lt+Lw;
D = nrow(yapp)/iR ; N = ncol(yapp);
if(verb>=1) {print('EM Initializations');}
if(!is.null(in_theta)) {
theta=in_theta;
K=length(theta$pi);
if(is.null(cstr$cw))
{cstr$cw=matrix(0,L,K);} # % Default value for cw
if(is.null(cstr$Gammaw))
{ cstr$Gammaw=array(diag(Lw),dim=c(Lw,Lw,K));} #% Default value for Gammaw
tmp = ExpectationZIID(tapp,yapp,yappR,iR,theta,verb);
r = tmp$r ;
ec = tmp$ec ;
tmp = remove_empty_clusters(theta,cstr,ec);
theta = tmp$theta ;
cstr = tmp$cstr ;
#tmp = ExpectationW(tapp,yapp,theta,verb);
#muw = tmp$muw
#Sw = tmp$Sw
if(verb>=1) print("");
} else {if(is.null(in_r)){ # en commentaire dans le code original : pourquoi?
# % Initialise posteriors with K-means + GMM on joint observed data
# % [~,C] = kmeans([t;y]',in_K);
# % [~, ~, ~, r] = emgm([t;y], C', 3, verb);
# % [~, ~, ~, r] = emgm([t;y], in_K, 3, verb);
r = emgmIID(rbind(tapp,yapp1), in_K, 1000, verb=verb)$R;
# % [~,cluster_idx]=max(r,NULL,2);
# % fig=figure;clf(fig);
# % scatter(t(1,:),t(2,:),200,cluster_idx','filled');
# % weight=model.weight;
# % K=length(weight);
# % mutt=model.mu(1:Lt,:);
# % Sigmatt=model.Sigma[1:Lt,1:Lt,];
# % normr=zeros(N,1);
# % r=zeros(N,K);
# % for k=1:K
# % r[,k]=weight[k]*mvnpdf(t',mutt[,k]',reshape(Sigmatt[,,k],Lt,Lt));
# % normr=normr+reshape(r[,k],N,1);
# % end
# % r=sweep(@rdivide,r,normr);
# % fig=figure;clf(fig);
# % [~,classes]=max(r,NULL,2); % Nx1
# % scatter(y(1,:),y(2,:),200,classes','filled');
} else {r=in_r$R;}
if(Lw==0) {Sw=NULL; muw=NULL;}
#else {
# % Start by running an M-step without hidden variables (partial
# % theta), deduce Awk by local weighted PCA on residuals (complete
# % theta), deduce r, muw and Sw from E-steps on complete theta.
# theta = MaximizationIID(tapp,yapp,R,r,NULL,NULL,cstr,verb);
# #print(colMeans(theta$A)) OK no problem here : error is fater
# K=length(theta$pi);
# if(is.null(cstr$cw))
# {cstr$cw=matrix(0,Lw,K);}
# theta$c=rbind(theta$c,cstr$cw[,1:K]);
#Gammaf=array(0,dim=c(L,L,K));
# Gammaf[1:Lt,1:Lt,]=theta$Gamma;
# if(is.null(cstr$Gammaw))
# {cstr$Gammaw=array(diag(Lw),dim=c(Lw,Lw,K));}
#Gammaf[(Lt+1):L,(Lt+1):L,]=cstr$Gammaw[,,1:K]; #%LwxLwxK
# theta$Gamma=Gammaf;
# % Initialize Awk with local weighted PCAs on residuals:
# Aw=array(0,dim=c(D,Lw,K));
# for (k in 1:K)
# {rk_bar=sum(r[,k]);
# bk=theta$b[,k];
# w=sweep(yapp,1,bk,"-"); #%DxN
# if(Lt>0)
# {Ak=theta$A[,,k];
# w=w-Ak%*%tapp;}
# w=sweep(w,2,sqrt(r[,k]/rk_bar),"*"); #%DxN
# C=tcrossprod(w); #% Residual weighted covariance matrix
# tmp = eigen(C) ##svd?
# U = tmp$vectors[,1:Lw]
# Lambda = tmp$values[1:Lw] #% Weighted residual PCA U:DxLw
#% The residual variance is the discarded eigenvalues' mean
# sigma2k=(sum(diag(C))-sum(Lambda))/(D-Lw); #scalar
#print(sigma2k) #OK here
# theta$Sigma[,,k]=sigma2k * diag(D);
# Aw[,,k]=U%*%sqrt(diag(Lambda,ncol=length(Lambda),nrow=length(Lambda))-sigma2k*diag(Lw));}
#theta$A=abind(theta$A,Aw,along=2); #%DxLxK
#tmp = ExpectationZIID(tapp,yapp,R,theta,verb);
# r =tmp$r ;
# ec=tmp$ec;
# tmp = remove_empty_clusters(theta,cstr,ec);
# theta = tmp$th ;
# cstr = tmp$cstr ;
# tmp = ExpectationW(tapp,yapp,theta,verb);
# muw = tmp$muw ;
# Sw = tmp$Sw ;
# if(verb>=1) print("");
# } # end else Lw
}
# %===============================EM Iterations==============================
if(verb>=1) print(' Running EM');
LL = rep(-Inf,maxiter);
iter = 0;
converged= FALSE;
while ( !converged & iter<maxiter)
{iter = iter + 1;
if(verb>=1) print(paste(' Iteration ',iter,sep=""));
# % =====================MAXIMIZATION STEP===========================
theta = MaximizationIID(tapp,yapp,yappR,iR,r,muw,Sw,cstr,verb);
# % =====================EXPECTATION STEPS===========================
tmp = ExpectationZIID(tapp,yapp,yappR,iR,theta,verb);
r =tmp$r ;
LL[iter] =tmp$LL;
if (verb>=1) {print(LL[iter]);}
ec=tmp$ec
tmp = remove_empty_clusters(theta,cstr,ec);
theta = tmp$th
cstr = tmp$cstr
# tmp = ExpectationW(tapp,yapp,theta,verb);
# muw=tmp$muw
# Sw=tmp$Sw
if(iter>=3)
{deltaLL_total=max(LL[1:iter])-min(LL[1:iter]);
deltaLL=LL[iter]-LL[iter-1];
converged=(deltaLL <= (0.001*deltaLL_total));
}
if(verb>=1) print("");
}
# %%% Final log-likelihood %%%%
LLf=LL[iter];
# % =============================Final plots===============================
if(verb>=1) print(paste('Converged in ',iter,' iterations',sep=""));
# if(verb>=2)
# { plot(LL);
# cluster_idx=max.col(r);
# # % for d=1:D/2
# # % fig=figure;clf(fig);
# # % scatter(y(2*d-1,:),y(2*d,:),200,cluster_idx');
# # % end
# # % fig=figure;clf(fig);
# # % scatter(t(1,:),t(2,:),200,cluster_idx','filled');
# }
#}
theta$r = r
theta$LLf=LLf
theta$LL = LL[1:iter]
return(theta)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.